summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc549
-rw-r--r--deps/v8/src/compiler/access-builder.h24
-rw-r--r--deps/v8/src/compiler/access-info.cc48
-rw-r--r--deps/v8/src/compiler/access-info.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc276
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h23
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc21
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc180
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc66
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc44
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc1361
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h61
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc21
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc622
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h126
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.cc43
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.h65
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc541
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h57
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.cc42
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h119
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.cc100
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.h67
-rw-r--r--deps/v8/src/compiler/code-assembler.cc1051
-rw-r--r--deps/v8/src/compiler/code-assembler.h369
-rw-r--r--deps/v8/src/compiler/code-generator.cc162
-rw-r--r--deps/v8/src/compiler/code-generator.h24
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc69
-rw-r--r--deps/v8/src/compiler/common-operator.cc301
-rw-r--r--deps/v8/src/compiler/common-operator.h136
-rw-r--r--deps/v8/src/compiler/control-builders.cc61
-rw-r--r--deps/v8/src/compiler/control-builders.h53
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc31
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc3526
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h244
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc42
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc74
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame-elider.cc35
-rw-r--r--deps/v8/src/compiler/frame-states.cc1
-rw-r--r--deps/v8/src/compiler/frame.h32
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc287
-rw-r--r--deps/v8/src/compiler/graph-assembler.h449
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc43
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc6
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc196
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc94
-rw-r--r--deps/v8/src/compiler/instruction-codes.h9
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h23
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc261
-rw-r--r--deps/v8/src/compiler/instruction-selector.h20
-rw-r--r--deps/v8/src/compiler/instruction.cc16
-rw-r--r--deps/v8/src/compiler/instruction.h146
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc310
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc200
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h13
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc116
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc227
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc207
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc61
-rw-r--r--deps/v8/src/compiler/js-graph.cc26
-rw-r--r--deps/v8/src/compiler/js-graph.h6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc4
-rw-r--r--deps/v8/src/compiler/js-inlining.cc87
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc31
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc452
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-operator.cc196
-rw-r--r--deps/v8/src/compiler/js-operator.h90
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc178
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h3
-rw-r--r--deps/v8/src/compiler/linkage.cc3
-rw-r--r--deps/v8/src/compiler/load-elimination.cc270
-rw-r--r--deps/v8/src/compiler/load-elimination.h55
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc194
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.h3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc80
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/machine-operator.cc166
-rw-r--r--deps/v8/src/compiler/machine-operator.h14
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc185
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h3
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc223
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h4
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc123
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc383
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h4
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc202
-rw-r--r--deps/v8/src/compiler/node-marker.h13
-rw-r--r--deps/v8/src/compiler/node-properties.cc14
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/node.cc6
-rw-r--r--deps/v8/src/compiler/node.h167
-rw-r--r--deps/v8/src/compiler/opcodes.h67
-rw-r--r--deps/v8/src/compiler/operation-typer.cc84
-rw-r--r--deps/v8/src/compiler/operator-properties.cc3
-rw-r--r--deps/v8/src/compiler/osr.cc23
-rw-r--r--deps/v8/src/compiler/pipeline.cc329
-rw-r--r--deps/v8/src/compiler/pipeline.h21
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc210
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h5
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc3
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc40
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc318
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h64
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc58
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h3
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc32
-rw-r--r--deps/v8/src/compiler/register-allocator.cc41
-rw-r--r--deps/v8/src/compiler/representation-change.cc65
-rw-r--r--deps/v8/src/compiler/representation-change.h1
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc176
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h1
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc139
-rw-r--r--deps/v8/src/compiler/schedule.cc2
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc254
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h7
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc185
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc9
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc116
-rw-r--r--deps/v8/src/compiler/simplified-operator.h76
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc323
-rw-r--r--deps/v8/src/compiler/state-values-utils.h49
-rw-r--r--deps/v8/src/compiler/type-cache.h16
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc128
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.h57
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc37
-rw-r--r--deps/v8/src/compiler/typed-optimization.h1
-rw-r--r--deps/v8/src/compiler/typer.cc105
-rw-r--r--deps/v8/src/compiler/types.cc27
-rw-r--r--deps/v8/src/compiler/types.h29
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc19
-rw-r--r--deps/v8/src/compiler/verifier.cc59
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc858
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h81
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc28
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc311
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h8
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc266
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc8
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc14
151 files changed, 12154 insertions, 9194 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 02de4edeac..10ffcb0f1a 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -6,3 +6,4 @@ jarin@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
titzer@chromium.org
+danno@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 540eb375b7..9fd531c637 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -9,6 +9,7 @@
#include "src/frames.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -16,47 +17,67 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForExternalDoubleValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), Type::Number(),
- MachineType::Float64(), kNoWriteBarrier};
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Number(), MachineType::Float64(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalTaggedValue() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalUint8Value() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMap() {
- FieldAccess access = {
- kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
- Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
+ FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kMapWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForHeapNumberValue() {
- FieldAccess access = {kTaggedBase,
- HeapNumber::kValueOffset,
- MaybeHandle<Name>(),
- TypeCache::Get().kFloat64,
- MachineType::Float64(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, HeapNumber::kValueOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kFloat64, MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {
- kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {
- kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -65,126 +86,127 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
int index) {
int const offset = map->GetInObjectPropertyOffset(index);
- FieldAccess access = {kTaggedBase,
- offset,
- MaybeHandle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForJSObjectOffset(
+ int offset, WriteBarrierKind write_barrier_kind) {
+ FieldAccess access = {kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ write_barrier_kind};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kPrototypeOrInitialMapOffset,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionLiterals() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kLiteralsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kCodeEntryOffset, Handle<Name>(),
- Type::OtherInternal(), MachineType::Pointer(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kCodeEntryOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kNextFunctionLinkOffset,
- Handle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kNextFunctionLinkOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kContextOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kContinuationOffset,
- Handle<Name>(),
- Type::SignedSmall(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kContinuationOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kInputOrDebugPosOffset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kInputOrDebugPosOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kOperandStackOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kRegisterFileOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kResumeModeOffset,
- Handle<Name>(),
- Type::SignedSmall(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kResumeModeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -194,6 +216,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
FieldAccess access = {kTaggedBase,
JSArray::kLengthOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
type_cache.kJSArrayLengthType,
MachineType::TaggedSigned(),
kFullWriteBarrier};
@@ -210,30 +233,28 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- FieldAccess access = {kTaggedBase,
- JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
- MaybeHandle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint32(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayBuffer::kBitFieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
- FieldAccess access = {kTaggedBase,
- JSArrayBufferView::kBufferOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -243,6 +264,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kByteLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -254,6 +276,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kByteOffsetOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -265,6 +288,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayLength() {
FieldAccess access = {kTaggedBase,
JSTypedArray::kLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kJSTypedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -276,6 +300,7 @@ FieldAccess AccessBuilder::ForJSDateValue() {
FieldAccess access = {kTaggedBase,
JSDate::kValueOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kJSDateValueType,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -284,48 +309,51 @@ FieldAccess AccessBuilder::ForJSDateValue() {
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {kTaggedBase,
- JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(),
- Type::Number(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Number(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
- FieldAccess access = {
- kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
- FieldAccess access = {
- kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
- FieldAccess access = {
- kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
- FieldAccess access = {
- kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -335,6 +363,7 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
FieldAccess access = {kTaggedBase,
FixedArray::kLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -343,12 +372,11 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
// static
FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
- FieldAccess access = {kTaggedBase,
- FixedTypedArrayBase::kBasePointerOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, FixedTypedArrayBase::kBasePointerOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -357,6 +385,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
FieldAccess access = {kTaggedBase,
FixedTypedArrayBase::kExternalPointerOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
@@ -365,53 +394,51 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheBridgeCacheOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField() {
- FieldAccess access = {kTaggedBase, Map::kBitFieldOffset,
- Handle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint8(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
- FieldAccess access = {kTaggedBase, Map::kBitField3Offset,
- Handle<Name>(), TypeCache::Get().kInt32,
- MachineType::Int32(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::Int32(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {kTaggedBase,
- Map::kDescriptorsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -419,48 +446,47 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset,
- Handle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint8(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapPrototype() {
- FieldAccess access = {
- kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, Map::kPrototypeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
- FieldAccess access = {kTaggedBase,
- Module::kRegularExportsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Module::kRegularExportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
- FieldAccess access = {kTaggedBase,
- Module::kRegularImportsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Module::kRegularImportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForNameHashField() {
- FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
- Handle<Name>(), Type::Internal(),
- MachineType::Uint32(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
@@ -469,6 +495,7 @@ FieldAccess AccessBuilder::ForStringLength() {
FieldAccess access = {kTaggedBase,
String::kLengthOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kStringLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -477,33 +504,37 @@ FieldAccess AccessBuilder::ForStringLength() {
// static
FieldAccess AccessBuilder::ForConsStringFirst() {
- FieldAccess access = {
- kTaggedBase, ConsString::kFirstOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, ConsString::kFirstOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForConsStringSecond() {
- FieldAccess access = {
- kTaggedBase, ConsString::kSecondOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, ConsString::kSecondOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringOffset() {
- FieldAccess access = {
- kTaggedBase, SlicedString::kOffsetOffset, Handle<Name>(),
- Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, SlicedString::kOffsetOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringParent() {
- FieldAccess access = {
- kTaggedBase, SlicedString::kParentOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, SlicedString::kParentOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -512,6 +543,7 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
FieldAccess access = {kTaggedBase,
ExternalString::kResourceDataOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
@@ -550,23 +582,20 @@ ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
// static
FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
- FieldAccess access = {kTaggedBase,
- JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(),
- Type::Receiver(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Receiver(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
- FieldAccess access = {kTaggedBase,
- JSGlobalObject::kNativeContextOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -575,6 +604,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
FieldAccess access = {kTaggedBase,
JSArrayIterator::kIteratedObjectOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
Type::ReceiverOrUndefined(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -589,6 +619,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
FieldAccess access = {kTaggedBase,
JSArrayIterator::kNextIndexOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveSafeInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -614,20 +645,20 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
// static
FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
- FieldAccess access = {kTaggedBase,
- JSArrayIterator::kIteratedObjectMapOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayIterator::kIteratedObjectMapOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSStringIteratorString() {
- FieldAccess access = {
- kTaggedBase, JSStringIterator::kStringOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSStringIterator::kStringOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -636,6 +667,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
FieldAccess access = {kTaggedBase,
JSStringIterator::kNextIndexOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kStringLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -644,52 +676,53 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
// static
FieldAccess AccessBuilder::ForValue() {
- FieldAccess access = {
- kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSValue::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- FieldAccess access = {
- kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
- FieldAccess access = {kTaggedBase,
- JSSloppyArgumentsObject::kCalleeOffset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
+FieldAccess AccessBuilder::ForFixedArraySlot(
+ size_t index, WriteBarrierKind write_barrier_kind) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase,
- offset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ write_barrier_kind};
return access;
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {
- kTaggedBase, Cell::kValueOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -698,31 +731,29 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- FieldAccess access = {kTaggedBase,
- offset,
- Handle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
- FieldAccess access = {kTaggedBase,
- ContextExtension::kScopeInfoOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, ContextExtension::kScopeInfoOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForContextExtensionExtension() {
- FieldAccess access = {
- kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, ContextExtension::kExtensionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -831,6 +862,68 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
return access;
}
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfElements() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(HashTableBase::kNumberOfElementsIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfDeletedElement() {
+ FieldAccess access = {
+ kTaggedBase, FixedArray::OffsetOfElementAt(
+ HashTableBase::kNumberOfDeletedElementsIndex),
+ MaybeHandle<Name>(), MaybeHandle<Map>(), Type::SignedSmall(),
+ MachineType::TaggedSigned(), kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(HashTableBase::kCapacityIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(NameDictionary::kNextEnumerationIndexIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index eb8e78fc36..f76aedf5a9 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -26,6 +26,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to a double field identified by an external reference.
static FieldAccess ForExternalDoubleValue();
+ // Provides access to a tagged field identified by an external reference.
+ static FieldAccess ForExternalTaggedValue();
+
+ // Provides access to an uint8 field identified by an external reference.
+ static FieldAccess ForExternalUint8Value();
+
// ===========================================================================
// Access to heap object fields and elements (based on tagged pointer).
@@ -43,6 +49,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSObject inobject property fields.
static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+ static FieldAccess ForJSObjectOffset(
+ int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -71,8 +79,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
- // Provides access to JSGeneratorObject::operand_stack() field.
- static FieldAccess ForJSGeneratorObjectOperandStack();
+ // Provides access to JSGeneratorObject::register_file() field.
+ static FieldAccess ForJSGeneratorObjectRegisterFile();
// Provides access to JSGeneratorObject::resume_mode() field.
static FieldAccess ForJSGeneratorObjectResumeMode();
@@ -218,7 +226,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForArgumentsCallee();
// Provides access to FixedArray slots.
- static FieldAccess ForFixedArraySlot(size_t index);
+ static FieldAccess ForFixedArraySlot(
+ size_t index, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
// Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
@@ -238,6 +247,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
+ // Provides access to HashTable fields.
+ static FieldAccess ForHashTableBaseNumberOfElements();
+ static FieldAccess ForHashTableBaseNumberOfDeletedElement();
+ static FieldAccess ForHashTableBaseCapacity();
+
+ // Provides access to Dictionary fields.
+ static FieldAccess ForDictionaryMaxNumberKey();
+ static FieldAccess ForDictionaryNextEnumerationIndex();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 866b06086a..f23154aa45 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -52,6 +52,8 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os << "Load";
case AccessMode::kStore:
return os << "Store";
+ case AccessMode::kStoreInLiteral:
+ return os << "StoreInLiteral";
}
UNREACHABLE();
return os;
@@ -144,13 +146,11 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
case kInvalid:
break;
- case kNotFound:
- return true;
-
case kDataField: {
// Check if we actually access the same field.
if (this->transition_map_.address() == that->transition_map_.address() &&
this->field_index_ == that->field_index_ &&
+ this->field_map_.address() == that->field_map_.address() &&
this->field_type_->Is(that->field_type_) &&
that->field_type_->Is(this->field_type_) &&
this->field_representation_ == that->field_representation_) {
@@ -173,6 +173,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
}
return false;
}
+
+ case kNotFound:
case kGeneric: {
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
@@ -282,7 +284,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
int const number = descriptors->SearchWithCache(isolate(), *name, *map);
if (number != DescriptorArray::kNotFound) {
PropertyDetails const details = descriptors->GetDetails(number);
- if (access_mode == AccessMode::kStore) {
+ if (access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral) {
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
return false;
@@ -295,14 +298,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return LookupTransition(receiver_map, name, holder, access_info);
}
}
- switch (details.type()) {
- case DATA_CONSTANT: {
- *access_info = PropertyAccessInfo::DataConstant(
- MapList{receiver_map},
- handle(descriptors->GetValue(number), isolate()), holder);
- return true;
- }
- case DATA: {
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
int index = descriptors->GetFieldIndex(number);
Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -344,8 +341,21 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
MapList{receiver_map}, field_index, field_representation,
field_type, field_map, holder);
return true;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ // TODO(turbofan): Add support for general accessors?
+ return false;
}
- case ACCESSOR_CONSTANT: {
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ *access_info = PropertyAccessInfo::DataConstant(
+ MapList{receiver_map},
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
Handle<Object> accessors(descriptors->GetValue(number), isolate());
if (!accessors->IsAccessorPair()) return false;
Handle<Object> accessor(
@@ -361,15 +371,12 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (optimization.api_call_info()->fast_handler()->IsCode()) {
return false;
}
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
*access_info = PropertyAccessInfo::AccessorConstant(
MapList{receiver_map}, accessor, holder);
return true;
}
- case ACCESSOR: {
- // TODO(turbofan): Add support for general accessors?
- return false;
- }
}
UNREACHABLE();
return false;
@@ -382,6 +389,11 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
+ // Don't search on the prototype when storing in literals
+ if (access_mode == AccessMode::kStoreInLiteral) {
+ return false;
+ }
+
// Don't lookup private symbols on the prototype chain.
if (name->IsPrivate()) return false;
@@ -503,7 +515,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) return false;
// TODO(bmeurer): Handle transition to data constant?
- if (details.type() != DATA) return false;
+ if (details.location() != kField) return false;
int const index = details.field_index();
Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 1d485dd0d4..e301ad9890 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -26,7 +26,8 @@ class Type;
class TypeCache;
// Whether we are loading a property or storing to a property.
-enum class AccessMode { kLoad, kStore };
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral };
std::ostream& operator<<(std::ostream&, AccessMode);
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index c473b9b6aa..a721f6a3be 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -32,6 +32,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case kFlags_branch:
case kFlags_deoptimize:
case kFlags_set:
+ case kFlags_trap:
return SetCC;
case kFlags_none:
return LeaveCC;
@@ -1504,6 +1505,110 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmFloat32x4Splat: {
+ __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+ break;
+ }
+ case kArmFloat32x4ExtractLane: {
+ __ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+ kScratchReg, i.InputInt8(1));
+ break;
+ }
+ case kArmFloat32x4ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+ break;
+ }
+ case kArmFloat32x4FromInt32x4: {
+ __ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4FromUint32x4: {
+ __ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Abs: {
+ __ vabs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Neg: {
+ __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Add: {
+ __ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Sub: {
+ __ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Eq: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Ne: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmInt32x4Splat: {
+ __ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kArmInt32x4ExtractLane: {
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
+ i.InputInt8(1));
+ break;
+ }
+ case kArmInt32x4ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(2), NeonS32, i.InputInt8(1));
+ break;
+ }
+ case kArmInt32x4FromFloat32x4: {
+ __ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmUint32x4FromFloat32x4: {
+ __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmInt32x4Add: {
+ __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Sub: {
+ __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Eq: {
+ __ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Ne: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(Neon32, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmSimd32x4Select: {
+ // Select is a ternary op, so we need to move one input into the
+ // destination. Use vtst to canonicalize the 'boolean' input #0.
+ __ vtst(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
@@ -1590,6 +1695,67 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ ArmOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Condition cc = FlagsConditionToCondition(condition);
+ __ b(cc, tlabel);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1824,9 +1990,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
@@ -1891,8 +2055,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsDoubleStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual s-registers.
// Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
@@ -1903,6 +2066,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsFloatStackSlot());
__ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
}
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(src.low(), 2),
+ NeonMemOperand(kScratchReg));
+ }
}
} else if (source->IsFPStackSlot()) {
MemOperand src = g.ToMemOperand(source);
@@ -1911,24 +2087,38 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src, kScratchReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ add(kScratchReg, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(dst.low(), 2),
+ NeonMemOperand(kScratchReg));
}
- } else {
+ } else if (rep == MachineRepresentation::kFloat64) {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ add(kScratchReg, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else {
@@ -1936,7 +2126,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
@@ -1975,7 +2164,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ vswp(src, dst);
+ __ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
@@ -1983,8 +2172,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vldr(src, dst);
__ vstr(temp, dst);
}
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
@@ -1998,29 +2186,55 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ VmovExtended(src_code, dst, kScratchReg);
__ vstr(temp.low(), dst);
}
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister src = g.ToSimd128Register(source);
+ if (destination->IsFPRegister()) {
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ Swap(src, dst);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(kScratchQuadReg, src);
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vld1(Neon8, NeonListOperand(src.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
- Register temp_0 = kScratchReg;
- LowDwVfpRegister temp_1 = kScratchDoubleReg;
- MemOperand src0 = g.ToMemOperand(source);
- MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ // Restore the 0 register.
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ vldr(kScratchDoubleReg.low(), dst);
+ __ vldr(kScratchDoubleReg.high(), src);
+ __ vstr(kScratchDoubleReg.low(), src);
+ __ vstr(kScratchDoubleReg.high(), dst);
} else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
- __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ vstr(temp_1.low(), src0);
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ src.set_offset(src.offset() + kDoubleSize);
+ dst.set_offset(dst.offset() + kDoubleSize);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ // Restore the 0 register.
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 07c4033bd6..6e5426c255 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -119,7 +119,28 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
- V(ArmPoke)
+ V(ArmPoke) \
+ V(ArmFloat32x4Splat) \
+ V(ArmFloat32x4ExtractLane) \
+ V(ArmFloat32x4ReplaceLane) \
+ V(ArmFloat32x4FromInt32x4) \
+ V(ArmFloat32x4FromUint32x4) \
+ V(ArmFloat32x4Abs) \
+ V(ArmFloat32x4Neg) \
+ V(ArmFloat32x4Add) \
+ V(ArmFloat32x4Sub) \
+ V(ArmFloat32x4Eq) \
+ V(ArmFloat32x4Ne) \
+ V(ArmInt32x4Splat) \
+ V(ArmInt32x4ExtractLane) \
+ V(ArmInt32x4ReplaceLane) \
+ V(ArmInt32x4FromFloat32x4) \
+ V(ArmUint32x4FromFloat32x4) \
+ V(ArmInt32x4Add) \
+ V(ArmInt32x4Sub) \
+ V(ArmInt32x4Eq) \
+ V(ArmInt32x4Ne) \
+ V(ArmSimd32x4Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 3f38e5ddef..8dfa68a2f6 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -108,6 +108,27 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32Min:
case kArmFloat64Min:
case kArmFloat64SilenceNaN:
+ case kArmFloat32x4Splat:
+ case kArmFloat32x4ExtractLane:
+ case kArmFloat32x4ReplaceLane:
+ case kArmFloat32x4FromInt32x4:
+ case kArmFloat32x4FromUint32x4:
+ case kArmFloat32x4Abs:
+ case kArmFloat32x4Neg:
+ case kArmFloat32x4Add:
+ case kArmFloat32x4Sub:
+ case kArmFloat32x4Eq:
+ case kArmFloat32x4Ne:
+ case kArmInt32x4Splat:
+ case kArmInt32x4ExtractLane:
+ case kArmInt32x4ReplaceLane:
+ case kArmInt32x4FromFloat32x4:
+ case kArmUint32x4FromFloat32x4:
+ case kArmInt32x4Add:
+ case kArmInt32x4Sub:
+ case kArmInt32x4Eq:
+ case kArmInt32x4Ne:
+ case kArmSimd32x4Select:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 5279d1eec1..def486af62 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -267,6 +267,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -501,6 +504,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitUnalignedLoad(Node* node) {
UnalignedLoadRepresentation load_rep =
UnalignedLoadRepresentationOf(node->op());
@@ -885,6 +893,9 @@ void VisitShift(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1252,10 +1263,14 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
+ } else {
+ DCHECK(cont->IsTrap());
+ InstructionOperand in[] = {temp_operand, result_operand, shift_31,
+ g.UseImmediate(cont->trap_id())};
+ selector->Emit(opcode, 0, nullptr, 4, in);
}
}
@@ -1643,9 +1658,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1836,6 +1854,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1992,10 +2013,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2019,6 +2043,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
@@ -2249,6 +2286,137 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitCreateFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmFloat32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmFloat32x4ReplaceLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4FromInt32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4FromUint32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Abs, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Neg, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Add, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Sub, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Eq, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Ne, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitCreateInt32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmInt32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmInt32x4ReplaceLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4FromFloat32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmUint32x4FromFloat32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Add, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Sub, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Eq, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Ne, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSimd32x4Select, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 8b1cb578e0..09fe0eb718 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -209,17 +209,16 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
- if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
} else {
return Operand(constant.ToInt32());
}
case Constant::kInt64:
- if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
return Operand(constant.ToInt64(), constant.rmode());
} else {
- DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode()));
return Operand(constant.ToInt64());
}
case Constant::kFloat32:
@@ -1702,6 +1701,65 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+ void Generate() final {
+ Arm64OperandConverter i(gen_, instr_);
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ // The trap code should never return.
+ __ Brk(0);
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ DCHECK(csp.Is(__ StackPointer()));
+ __ Move(cp, isolate()->native_context());
+ // Initialize the jssp because it is required for the runtime call.
+ __ Mov(jssp, csp);
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Condition cc = FlagsConditionToCondition(condition);
+ __ B(cc, tlabel);
+}
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 0eef53c6d5..9cb33f6c44 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -471,6 +471,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -708,6 +711,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1061,6 +1069,7 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
// OP is >>> or >> and (K & 0x1f) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
+ (mleft.right().Value() & 0x1f) != 0 &&
(mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
@@ -1379,9 +1388,12 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), result, result,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1995,9 +2007,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2513,11 +2528,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
- } else {
- DCHECK(cont->IsDeoptimize());
+ } else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
cont->reason(), cont->frame_state());
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value),
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2541,6 +2560,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 1b7d1169dd..8c5dce61ee 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -17,7 +17,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -166,8 +166,6 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
void ReturnValue(Node* return_value);
void ThrowValue(Node* exception_value);
- class DeferredCommands;
-
protected:
enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
@@ -207,93 +205,6 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
int stack_height_;
};
-// Helper class for a try-finally control scope. It can record intercepted
-// control-flow commands that cause entry into a finally-block, and re-apply
-// them after again leaving that block. Special tokens are used to identify
-// paths going through the finally-block to dispatch after leaving the block.
-class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
- public:
- explicit DeferredCommands(AstGraphBuilder* owner)
- : owner_(owner),
- deferred_(owner->local_zone()),
- return_token_(nullptr),
- throw_token_(nullptr) {}
-
- // One recorded control-flow command.
- struct Entry {
- Command command; // The command type being applied on this path.
- Statement* statement; // The target statement for the command or {nullptr}.
- Node* token; // A token identifying this particular path.
- };
-
- // Records a control-flow command while entering the finally-block. This also
- // generates a new dispatch token that identifies one particular path.
- Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
- Node* token = nullptr;
- switch (cmd) {
- case CMD_BREAK:
- case CMD_CONTINUE:
- token = NewPathToken(dispenser_.GetBreakContinueToken());
- break;
- case CMD_THROW:
- if (throw_token_) return throw_token_;
- token = NewPathToken(TokenDispenserForFinally::kThrowToken);
- throw_token_ = token;
- break;
- case CMD_RETURN:
- if (return_token_) return return_token_;
- token = NewPathToken(TokenDispenserForFinally::kReturnToken);
- return_token_ = token;
- break;
- }
- DCHECK_NOT_NULL(token);
- deferred_.push_back({cmd, stmt, token});
- return token;
- }
-
- // Returns the dispatch token to be used to identify the implicit fall-through
- // path at the end of a try-block into the corresponding finally-block.
- Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
-
- // Applies all recorded control-flow commands after the finally-block again.
- // This generates a dynamic dispatch on the token from the entry point.
- void ApplyDeferredCommands(Node* token, Node* value) {
- SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
- dispatch.BeginSwitch();
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
- dispatch.BeginLabel(static_cast<int>(i), condition);
- dispatch.EndLabel();
- }
- for (size_t i = 0; i < deferred_.size(); ++i) {
- dispatch.BeginCase(static_cast<int>(i));
- owner_->execution_control()->PerformCommand(
- deferred_[i].command, deferred_[i].statement, value);
- dispatch.EndCase();
- }
- dispatch.EndSwitch();
- }
-
- protected:
- Node* NewPathToken(int token_id) {
- return owner_->jsgraph()->Constant(token_id);
- }
- Node* NewPathTokenForImplicitFallThrough() {
- return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
- }
- Node* NewPathDispatchCondition(Node* t1, Node* t2) {
- return owner_->NewNode(
- owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
- }
-
- private:
- TokenDispenserForFinally dispenser_;
- AstGraphBuilder* owner_;
- ZoneVector<Entry> deferred_;
- Node* return_token_;
- Node* throw_token_;
-};
-
// Control scope implementation for a BreakableStatement.
class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
@@ -356,65 +267,9 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
};
-// Control scope implementation for a TryCatchStatement.
-class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
- public:
- ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
- TryCatchBuilder* control)
- : ControlScope(owner), control_(control) {
- builder()->try_nesting_level_++; // Increment nesting.
- }
- ~ControlScopeForCatch() {
- builder()->try_nesting_level_--; // Decrement nesting.
- }
-
- protected:
- bool Execute(Command cmd, Statement* target, Node** value) override {
- switch (cmd) {
- case CMD_THROW:
- control_->Throw(*value);
- return true;
- case CMD_BREAK:
- case CMD_CONTINUE:
- case CMD_RETURN:
- break;
- }
- return false;
- }
-
- private:
- TryCatchBuilder* control_;
-};
-
-
-// Control scope implementation for a TryFinallyStatement.
-class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
- public:
- ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
- DeferredCommands* commands, TryFinallyBuilder* control)
- : ControlScope(owner), commands_(commands), control_(control) {
- builder()->try_nesting_level_++; // Increment nesting.
- }
- ~ControlScopeForFinally() {
- builder()->try_nesting_level_--; // Decrement nesting.
- }
-
- protected:
- bool Execute(Command cmd, Statement* target, Node** value) override {
- Node* token = commands_->RecordCommand(cmd, target, *value);
- control_->LeaveTry(token, *value);
- return true;
- }
-
- private:
- DeferredCommands* commands_;
- TryFinallyBuilder* control_;
-};
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, float invocation_frequency,
- LoopAssignmentAnalysis* loop,
- TypeHintAnalysis* type_hint_analysis)
+ LoopAssignmentAnalysis* loop)
: isolate_(info->isolate()),
local_zone_(local_zone),
info_(info),
@@ -425,12 +280,10 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
globals_(0, local_zone),
execution_control_(nullptr),
execution_context_(nullptr),
- try_nesting_level_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
- type_hint_analysis_(type_hint_analysis),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
false, local_zone),
@@ -453,7 +306,7 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
// calling eval, not the anonymous closure containing the eval code.
const Operator* op =
javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
- return NewNode(op, current_context());
+ return NewNode(op);
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -483,18 +336,6 @@ Node* AstGraphBuilder::GetFunctionContext() {
return function_context_.get();
}
-
-Node* AstGraphBuilder::GetNewTarget() {
- if (!new_target_.is_set()) {
- int params = info()->num_parameters_including_this();
- int index = Linkage::GetJSCallNewTargetParamIndex(params);
- const Operator* op = common()->Parameter(index, "%new.target");
- Node* node = NewNode(op, graph()->start());
- new_target_.set(node);
- }
- return new_target_.get();
-}
-
Node* AstGraphBuilder::GetEmptyFrameState() {
if (!empty_frame_state_.is_set()) {
const Operator* op = common()->FrameState(
@@ -573,15 +414,10 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
- // Build rest arguments array if it is used.
- Variable* rest_parameter = scope->rest_parameter();
- BuildRestArgumentsArray(rest_parameter);
-
- // Build assignment to {.this_function} variable if it is used.
- BuildThisFunctionVariable(scope->this_function_var());
-
- // Build assignment to {new.target} variable if it is used.
- BuildNewTargetVariable(scope->new_target_var());
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(scope->new_target_var());
+ DCHECK_NULL(scope->rest_parameter());
+ DCHECK_NULL(scope->this_function_var());
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -835,7 +671,7 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
if (should_update) {
- const Operator* op = common()->StateValues(count);
+ const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
(*state_values) = graph()->NewNode(op, count, env_values);
}
}
@@ -1092,6 +928,7 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals()->push_back(variable->name());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
@@ -1109,17 +946,10 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
if (variable->binding_needs_init()) {
Node* value = jsgraph()->TheHoleConstant();
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, current_context(), value);
+ NewNode(op, value);
}
break;
- case VariableLocation::LOOKUP: {
- DCHECK(!variable->binding_needs_init());
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
- Node* store = NewNode(op, name);
- PrepareFrameState(store, decl->proxy()->id());
- break;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1134,6 +964,7 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
+ globals()->push_back(variable->name());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
@@ -1151,19 +982,10 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, current_context(), value);
- break;
- }
- case VariableLocation::LOOKUP: {
- VisitForValue(decl->fun());
- Node* value = environment()->Pop();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
- Node* store = NewNode(op, name, value);
- PrepareFrameState(store, decl->proxy()->id());
+ NewNode(op, value);
break;
}
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1240,14 +1062,8 @@ void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
- VisitForValue(stmt->expression());
- Node* value = environment()->Pop();
- Node* object = BuildToObject(value, stmt->ToObjectId());
- Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
- const Operator* op = javascript()->CreateWithContext(scope_info);
- Node* context = NewNode(op, object, GetFunctionClosureForContext());
- PrepareFrameState(context, stmt->EntryId());
- VisitInScope(stmt->statement(), stmt->scope(), context);
+ // Dynamic scoping is supported only by going through Ignition first.
+ UNREACHABLE();
}
@@ -1277,13 +1093,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
Node* label = environment()->Pop();
Node* tag = environment()->Top();
- CompareOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
- &hint)) {
- hint = CompareOperationHint::kAny;
- }
-
+ CompareOperationHint hint = CompareOperationHint::kAny;
const Operator* op = javascript()->StrictEqual(hint);
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1450,114 +1260,20 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
- LoopBuilder for_loop(this);
- VisitForEffect(stmt->assign_iterator());
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitForEffect(stmt->next_result());
- VisitForTest(stmt->result_done());
- Node* condition = environment()->Pop();
- for_loop.BreakWhen(condition);
- VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
- for_loop.EndBody();
- for_loop.EndLoop();
+ // Iterator looping is supported only by going through Ignition first.
+ UNREACHABLE();
}
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TryCatchBuilder try_control(this);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting 'throw' control commands.
- try_control.BeginTry();
- {
- ControlScopeForCatch scope(this, stmt, &try_control);
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- environment()->Push(current_context());
- Visit(stmt->try_block());
- environment()->Pop();
- }
- try_control.EndTry();
-
- // If requested, clear message object as we enter the catch block.
- if (stmt->clear_pending_message()) {
- Node* the_hole = jsgraph()->TheHoleConstant();
- NewNode(javascript()->StoreMessage(), the_hole);
- }
-
- // Create a catch scope that binds the exception.
- Node* exception = try_control.GetExceptionNode();
- Handle<String> name = stmt->variable()->name();
- Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
- const Operator* op = javascript()->CreateCatchContext(name, scope_info);
- Node* context = NewNode(op, exception, GetFunctionClosureForContext());
-
- // Evaluate the catch-block.
- VisitInScope(stmt->catch_block(), stmt->scope(), context);
- try_control.EndCatch();
+ // Exception handling is supported only by going through Ignition first.
+ UNREACHABLE();
}
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TryFinallyBuilder try_control(this);
-
- // We keep a record of all paths that enter the finally-block to be able to
- // dispatch to the correct continuation point after the statements in the
- // finally-block have been evaluated.
- //
- // The try-finally construct can enter the finally-block in three ways:
- // 1. By exiting the try-block normally, falling through at the end.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (i.e. through break/continue/return statements).
- // 3. By exiting the try-block with a thrown exception.
- Node* fallthrough_result = jsgraph()->TheHoleConstant();
- ControlScope::DeferredCommands* commands =
- new (local_zone()) ControlScope::DeferredCommands(this);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting all control commands.
- try_control.BeginTry();
- {
- ControlScopeForFinally scope(this, stmt, commands, &try_control);
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- environment()->Push(current_context());
- Visit(stmt->try_block());
- environment()->Pop();
- }
- try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
-
- // The result value semantics depend on how the block was entered:
- // - ReturnStatement: It represents the return value being returned.
- // - ThrowStatement: It represents the exception being thrown.
- // - BreakStatement/ContinueStatement: Filled with the hole.
- // - Falling through into finally-block: Filled with the hole.
- Node* result = try_control.GetResultValueNode();
- Node* token = try_control.GetDispatchTokenNode();
-
- // The result value, dispatch token and message is expected on the operand
- // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
- Node* message = NewNode(javascript()->LoadMessage());
- environment()->Push(token);
- environment()->Push(result);
- environment()->Push(message);
-
- // Clear message object as we enter the finally block.
- Node* the_hole = jsgraph()->TheHoleConstant();
- NewNode(javascript()->StoreMessage(), the_hole);
-
- // Evaluate the finally-block.
- Visit(stmt->finally_block());
- try_control.EndFinally();
-
- // The result value, dispatch token and message is restored from the operand
- // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
- message = environment()->Pop();
- result = environment()->Pop();
- token = environment()->Pop();
- NewNode(javascript()->StoreMessage(), message);
-
- // Dynamic dispatch after the finally-block.
- commands->ApplyDeferredCommands(token, result);
+ // Exception handling is supported only by going through Ignition first.
+ UNREACHABLE();
}
@@ -1577,112 +1293,14 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// Create node to instantiate a new closure.
PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+ const Operator* op =
+ javascript()->CreateClosure(shared_info, pair, pretenure);
Node* value = NewNode(op);
ast_context()->ProduceValue(expr, value);
}
-
-void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- VisitForValueOrTheHole(expr->extends());
- VisitForValue(expr->constructor());
-
- // Create node to instantiate a new class.
- Node* constructor = environment()->Pop();
- Node* extends = environment()->Pop();
- Node* start = jsgraph()->Constant(expr->start_position());
- Node* end = jsgraph()->Constant(expr->end_position());
- const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
- Node* literal = NewNode(opc, extends, constructor, start, end);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
- environment()->Push(literal);
-
- // Load the "prototype" from the constructor.
- PrepareEagerCheckpoint(expr->CreateLiteralId());
- Handle<Name> name = isolate()->factory()->prototype_string();
- VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
- Node* prototype = BuildNamedLoad(literal, name, pair);
- PrepareFrameState(prototype, expr->PrototypeId(),
- OutputFrameStateCombine::Push());
- environment()->Push(prototype);
-
- // Create nodes to store method values into the literal.
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
- environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
-
- VisitForValue(property->key());
- Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
- environment()->Push(name);
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
- expr->GetIdForProperty(i));
- environment()->Push(check);
- }
-
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* key = environment()->Pop();
- Node* receiver = environment()->Pop();
-
- BuildSetHomeObject(value, receiver, property);
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- Node* set_function_name =
- jsgraph()->Constant(property->NeedsSetFunctionName());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- case ClassLiteral::Property::GETTER: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineGetterPropertyUnchecked, 4);
- NewNode(op, receiver, key, value, attr);
- break;
- }
- case ClassLiteral::Property::SETTER: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineSetterPropertyUnchecked, 4);
- NewNode(op, receiver, key, value, attr);
- break;
- }
- case ClassLiteral::Property::FIELD: {
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Set the constructor to have fast properties.
- prototype = environment()->Pop();
- literal = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
- literal = NewNode(op, literal);
-
- // Assign to class variable.
- if (expr->class_variable_proxy() != nullptr) {
- Variable* var = expr->class_variable_proxy()->var();
- VectorSlotPair feedback = CreateVectorSlotPair(
- expr->NeedsProxySlot() ? expr->ProxySlot()
- : FeedbackVectorSlot::Invalid());
- BuildVariableAssignment(var, literal, Token::INIT, feedback,
- BailoutId::None());
- }
- ast_context()->ProduceValue(expr, literal);
-}
-
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
UNREACHABLE();
@@ -1746,7 +1364,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralObject(
- expr->constant_properties(), expr->ComputeFlags(true),
+ expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
expr->literal_index(), expr->properties_count());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
@@ -1757,15 +1375,15 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
environment()->Push(literal);
// Create nodes to store computed values into the literal.
- int property_index = 0;
AccessorTable accessor_table(local_zone());
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1818,21 +1436,20 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
javascript()->CallRuntime(Runtime::kInternalSetPrototype);
Node* set_prototype = NewNode(op, receiver, value);
// SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype,
- expr->GetIdForPropertySet(property_index));
+ PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
break;
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1856,77 +1473,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* call = NewNode(op, literal, name, getter, setter, attr);
PrepareFrameState(call, it->second->bailout_id);
}
-
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
- // with the first computed property name and continues with all properties to
- // its right. All the code from above initializes the static component of the
- // object literal, and arranges for the map of the result to reflect the
- // static order in which the keys appear. For the dynamic properties, we
- // compile them into a series of "SetOwnProperty" runtime calls. This will
- // preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- environment()->Push(environment()->Top()); // Duplicate receiver.
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* receiver = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype);
- Node* call = NewNode(op, receiver, value);
- PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
- continue;
- }
-
- environment()->Push(environment()->Top()); // Duplicate receiver.
- VisitForValue(property->key());
- Node* name = BuildToName(environment()->Pop(),
- expr->GetIdForPropertyName(property_index));
- environment()->Push(name);
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* key = environment()->Pop();
- Node* receiver = environment()->Pop();
- BuildSetHomeObject(value, receiver, property);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::COMPUTED:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
- if (!property->emit_store()) continue;
- Node* attr = jsgraph()->Constant(NONE);
- Node* set_function_name =
- jsgraph()->Constant(property->NeedsSetFunctionName());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
- PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
- break;
- }
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE(); // Handled specially above.
- break;
- case ObjectLiteral::Property::GETTER: {
- Node* attr = jsgraph()->Constant(NONE);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineGetterPropertyUnchecked, 4);
- Node* call = NewNode(op, receiver, key, value, attr);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- Node* attr = jsgraph()->Constant(NONE);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineSetterPropertyUnchecked, 4);
- Node* call = NewNode(op, receiver, key, value, attr);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- }
- }
-
ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -1947,7 +1493,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralArray(
- expr->constant_elements(), expr->ComputeFlags(true),
+ expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
expr->literal_index(), expr->values()->length());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
@@ -2015,31 +1561,10 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
- case NAMED_SUPER_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
}
@@ -2071,13 +1596,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VisitForValue(property->key());
break;
case NAMED_SUPER_PROPERTY:
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- break;
case KEYED_SUPER_PROPERTY:
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
+ UNREACHABLE();
break;
}
@@ -2115,28 +1635,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Top();
- Node* receiver = environment()->Peek(1);
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Top();
- Node* home_object = environment()->Peek(1);
- Node* receiver = environment()->Peek(2);
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
environment()->Push(old_value);
VisitForValue(expr->value());
@@ -2181,22 +1683,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
ast_context()->ProduceValue(expr, value);
@@ -2205,8 +1695,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
void AstGraphBuilder::VisitYield(Yield* expr) {
// Generator functions are supported only by going through Ignition first.
- SetStackOverflow();
- ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
+ UNREACHABLE();
}
@@ -2243,27 +1732,10 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
- value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(expr->key());
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
ast_context()->ProduceValue(expr, value);
}
@@ -2272,140 +1744,70 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
void AstGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType();
+ CHECK(!expr->is_possibly_eval());
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
Node* receiver_value = nullptr;
Node* callee_value = nullptr;
- if (expr->is_possibly_eval()) {
- if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
- Variable* variable = callee->AsVariableProxy()->var();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
- Node* pair = NewNode(op, name);
- callee_value = NewNode(common()->Projection(0), pair);
- receiver_value = NewNode(common()->Projection(1), pair);
- PrepareFrameState(pair, expr->LookupId(),
- OutputFrameStateCombine::Push(2));
- } else {
- VisitForValue(callee);
- callee_value = environment()->Pop();
+ switch (call_type) {
+ case Call::GLOBAL_CALL: {
+ VariableProxy* proxy = callee->AsVariableProxy();
+ VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
+ break;
}
- } else {
- switch (call_type) {
- case Call::GLOBAL_CALL: {
- VariableProxy* proxy = callee->AsVariableProxy();
- VectorSlotPair pair =
- CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
- }
- case Call::WITH_CALL: {
- Variable* variable = callee->AsVariableProxy()->var();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
- Node* pair = NewNode(op, name);
- callee_value = NewNode(common()->Projection(0), pair);
- receiver_value = NewNode(common()->Projection(1), pair);
- PrepareFrameState(pair, expr->LookupId(),
- OutputFrameStateCombine::Push(2));
- break;
- }
- case Call::NAMED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* object = environment()->Top();
- callee_value = BuildNamedLoad(object, name, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Top();
- callee_value = BuildKeyedLoad(object, key, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::NAMED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- Node* home = environment()->Peek(1);
- Node* object = environment()->Top();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value =
- BuildNamedSuperLoad(object, home, name, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
- }
- case Call::KEYED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- environment()->Push(environment()->Top()); // Duplicate this_var.
- environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* home = environment()->Pop();
- Node* object = environment()->Pop();
- callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
- }
- case Call::SUPER_CALL:
- return VisitCallSuper(expr);
- case Call::OTHER_CALL:
- VisitForValue(callee);
- callee_value = environment()->Pop();
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
}
+ case Call::OTHER_CALL:
+ VisitForValue(callee);
+ callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ case Call::SUPER_CALL:
+ case Call::WITH_CALL:
+ UNREACHABLE();
}
// The callee and the receiver both have to be pushed onto the operand stack
@@ -2417,41 +1819,13 @@ void AstGraphBuilder::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Resolve callee for a potential direct eval call. This block will mutate the
- // callee value pushed onto the environment.
- if (expr->is_possibly_eval() && args->length() > 0) {
- int arg_count = args->length();
-
- // Extract callee and source string from the environment.
- Node* callee = environment()->Peek(arg_count + 1);
- Node* source = environment()->Peek(arg_count - 1);
-
- // Create node to ask for help resolving potential eval call. This will
- // provide a fully resolved callee to patch into the environment.
- Node* function = GetFunctionClosure();
- Node* language = jsgraph()->Constant(language_mode());
- Node* eval_scope_position =
- jsgraph()->Constant(current_scope()->start_position());
- Node* eval_position = jsgraph()->Constant(expr->position());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
- Node* new_callee = NewNode(op, callee, source, function, language,
- eval_scope_position, eval_position);
- PrepareFrameState(new_callee, expr->EvalId(),
- OutputFrameStateCombine::PokeAt(arg_count + 1));
-
- // Patch callee on the environment.
- environment()->Poke(arg_count + 1, new_callee);
- }
-
// Create node to perform the function call.
float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call =
javascript()->CallFunction(args->length() + 2, frequency, feedback,
receiver_hint, expr->tail_call_mode());
- PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
- : expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
// The callee passed to the call, we just need to push something here to
// satisfy the bailout location contract. The fullcodegen code will not
@@ -2463,34 +1837,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
}
-void AstGraphBuilder::VisitCallSuper(Call* expr) {
- SuperCallReference* super = expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super);
-
- // Prepare the callee to the super call.
- VisitForValue(super->this_function_var());
- Node* this_function = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
- Node* super_function = NewNode(op, this_function);
- environment()->Push(super_function);
-
- // Evaluate all arguments to the super call.
- ZoneList<Expression*>* args = expr->arguments();
- VisitForValues(args);
-
- // The new target is loaded from the {new.target} variable.
- VisitForValue(super->new_target_var());
-
- // Create node to perform the super call.
- const Operator* call =
- javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
- Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- ast_context()->ProduceValue(expr, value);
-}
-
-
void AstGraphBuilder::VisitCallNew(CallNew* expr) {
VisitForValue(expr->expression());
@@ -2625,35 +1971,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
stack_depth = 2;
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Top();
- Node* receiver = environment()->Peek(1);
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- stack_depth = 2;
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
- Node* key = environment()->Top();
- Node* home_object = environment()->Peek(1);
- Node* receiver = environment()->Peek(2);
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- stack_depth = 3;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
// Convert old value into a number.
@@ -2708,24 +2029,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
// Restore old value for postfix expressions.
@@ -2804,13 +2111,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
}
- CompareOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetCompareOperationHint(
- expr->CompareOperationFeedbackId(), &hint)) {
- hint = CompareOperationHint::kAny;
- }
-
+ CompareOperationHint hint = CompareOperationHint::kAny;
const Operator* op;
switch (expr->op()) {
case Token::EQ:
@@ -2868,6 +2169,10 @@ void AstGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void AstGraphBuilder::VisitGetIterator(GetIterator* expr) {
+ // GetIterator is supported only by going through Ignition first.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
@@ -2877,8 +2182,7 @@ void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
void AstGraphBuilder::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
- Node* value = BuildThrowUnsupportedSuperError(expr->id());
- ast_context()->ProduceValue(expr, value);
+ UNREACHABLE();
}
@@ -2905,10 +2209,10 @@ void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
int encoded_flags = info()->GetDeclareGlobalsFlags();
Node* flags = jsgraph()->Constant(encoded_flags);
- Node* pairs = jsgraph()->Constant(data);
+ Node* decls = jsgraph()->Constant(data);
Node* vector = jsgraph()->Constant(feedback_vector);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
- Node* call = NewNode(op, pairs, flags, vector);
+ Node* call = NewNode(op, decls, flags, vector);
PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
}
@@ -2920,12 +2224,6 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
}
-void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
- ContextScope scope(this, s, context);
- DCHECK(s->declarations()->is_empty());
- Visit(stmt);
-}
-
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop,
BailoutId stack_check_id) {
@@ -3074,46 +2372,6 @@ void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
-
-namespace {
-
-// Limit of context chain length to which inline check is possible.
-const int kMaxCheckDepth = 30;
-
-// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
-const uint32_t kFullCheckRequired = -1;
-
-} // namespace
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
- DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
- uint32_t check_depths = 0;
- for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (!s->calls_sloppy_eval()) continue;
- int depth = current_scope()->ContextChainLength(s);
- if (depth > kMaxCheckDepth) return kFullCheckRequired;
- check_depths |= 1 << depth;
- }
- return check_depths;
-}
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
- DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
- uint32_t check_depths = 0;
- for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
- int depth = current_scope()->ContextChainLength(s);
- if (depth > kMaxCheckDepth) return kFullCheckRequired;
- check_depths |= 1 << depth;
- if (s == variable->scope()) break;
- }
- return check_depths;
-}
-
float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
if (slot.IsInvalid()) return 0.0f;
Handle<TypeFeedbackVector> feedback_vector(
@@ -3147,7 +2405,8 @@ Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
Variable* variable = scope->receiver();
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, local_context, receiver);
+ Node* node = NewNode(op, receiver);
+ NodeProperties::ReplaceContextInput(node, local_context);
}
// Copy parameters into context if necessary.
@@ -3159,7 +2418,8 @@ Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
// Context variable (at bottom of the context chain).
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, local_context, parameter);
+ Node* node = NewNode(op, parameter);
+ NodeProperties::ReplaceContextInput(node, local_context);
}
return local_context;
@@ -3171,7 +2431,8 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
// Allocate a new local context.
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- const Operator* op = javascript()->CreateFunctionContext(slot_count);
+ const Operator* op =
+ javascript()->CreateFunctionContext(slot_count, scope->scope_type());
Node* local_context = NewNode(op, GetFunctionClosure());
return local_context;
@@ -3224,52 +2485,6 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
return object;
}
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
- if (rest == nullptr) return nullptr;
-
- // Allocate and initialize a new arguments object.
- CreateArgumentsType type = CreateArgumentsType::kRestParameter;
- const Operator* op = javascript()->CreateArguments(type);
- Node* object = NewNode(op, GetFunctionClosure());
- PrepareFrameState(object, BailoutId::None());
-
- // Assign the object to the {rest} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None());
- return object;
-}
-
-
-Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
- if (this_function_var == nullptr) return nullptr;
-
- // Retrieve the closure we were called with.
- Node* this_function = GetFunctionClosure();
-
- // Assign the object to the {.this_function} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(this_function_var, this_function, Token::INIT,
- VectorSlotPair(), BailoutId::None());
- return this_function;
-}
-
-
-Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
- if (new_target_var == nullptr) return nullptr;
-
- // Retrieve the new target we were called with.
- Node* object = GetNewTarget();
-
- // Assign the object to the {new.target} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
- BailoutId::None());
- return object;
-}
-
-
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
Node* not_hole,
BailoutId bailout_id) {
@@ -3305,25 +2520,6 @@ Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
return environment()->Pop();
}
-
-Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
- BailoutId bailout_id) {
- IfBuilder prototype_check(this);
- Node* prototype_string =
- jsgraph()->Constant(isolate()->factory()->prototype_string());
- Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- name, prototype_string);
- prototype_check.If(check);
- prototype_check.Then();
- Node* error = BuildThrowStaticPrototypeError(bailout_id);
- environment()->Push(error);
- prototype_check.Else();
- environment()->Push(name);
- prototype_check.End();
- return environment()->Pop();
-}
-
-
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
const VectorSlotPair& feedback,
@@ -3363,7 +2559,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
info()->is_function_context_specializing();
const Operator* op =
javascript()->LoadContext(depth, variable->index(), immutable);
- Node* value = NewNode(op, current_context());
+ Node* value = NewNode(op);
// TODO(titzer): initialization checks are redundant for already
// initialized immutable context loads, but only specialization knows.
// Maybe specializer should be a parameter to the graph builder?
@@ -3373,17 +2569,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
return value;
}
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Handle<String> name = variable->name();
- if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
- feedback, combine, typeof_mode)) {
- return node;
- }
- Node* value = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(value, bailout_id, combine);
- return value;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3411,15 +2597,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
// Local var, const, or let variable or context variable.
return jsgraph()->BooleanConstant(variable->is_this());
}
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
- Node* result = NewNode(op, name);
- PrepareFrameState(result, bailout_id, combine);
- return result;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3498,7 +2676,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
@@ -3507,7 +2685,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (variable->is_this()) {
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT &&
@@ -3524,22 +2702,16 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (variable->binding_needs_init()) {
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
// Assignment to const is exception in all modes.
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
- return NewNode(op, current_context(), value);
- }
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Handle<Name> name = variable->name();
- Node* store = BuildDynamicStore(name, value);
- PrepareFrameState(store, bailout_id, combine);
- return store;
+ return NewNode(op, value);
}
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3551,7 +2723,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ Node* node = NewNode(op, object, key);
return node;
}
@@ -3559,7 +2731,7 @@ Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
+ Node* node = NewNode(op, object);
return node;
}
@@ -3567,7 +2739,7 @@ Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, key, value);
return node;
}
@@ -3577,49 +2749,7 @@ Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, value, GetFunctionClosure());
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
- Handle<Name> name,
- const VectorSlotPair& feedback) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
- Node* node = NewNode(op, receiver, home_object, name_node);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
- Node* key,
- const VectorSlotPair& feedback) {
- const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
- Node* node = NewNode(op, receiver, home_object, key);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
- Node* key, Node* value) {
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy;
- const Operator* op = javascript()->CallRuntime(function_id, 4);
- Node* node = NewNode(op, receiver, home_object, key, value);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value) {
- Node* name_node = jsgraph()->Constant(name);
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy;
- const Operator* op = javascript()->CallRuntime(function_id, 4);
- Node* node = NewNode(op, receiver, home_object, name_node, value);
+ Node* node = NewNode(op, object, value);
return node;
}
@@ -3628,7 +2758,7 @@ Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode) {
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, GetFunctionClosure());
+ Node* node = NewNode(op);
return node;
}
@@ -3637,33 +2767,10 @@ Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreGlobal(language_mode(), name, feedback);
- Node* node = NewNode(op, value, GetFunctionClosure());
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
- TypeofMode typeof_mode) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op =
- javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof);
- Node* node = NewNode(op, name_node);
+ Node* node = NewNode(op, value);
return node;
}
-
-Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op = javascript()->CallRuntime(
- is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- Node* node = NewNode(op, name_node, value);
- return node;
-}
-
-
Node* AstGraphBuilder::BuildLoadGlobalObject() {
return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
}
@@ -3672,30 +2779,20 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
const Operator* op =
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
- Node* native_context = NewNode(op, current_context());
- return NewNode(javascript()->LoadContext(0, index, true), native_context);
+ Node* native_context = NewNode(op);
+ Node* result = NewNode(javascript()->LoadContext(0, index, true));
+ NodeProperties::ReplaceContextInput(result, native_context);
+ return result;
}
Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
if (Node* node = TryFastToBoolean(input)) return node;
- ToBooleanHints hints;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
- hints = ToBooleanHint::kAny;
- }
+ ToBooleanHints hints = ToBooleanHint::kAny;
return NewNode(javascript()->ToBoolean(hints), input);
}
-Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
- if (Node* node = TryFastToName(input)) return node;
- Node* name = NewNode(javascript()->ToName(), input);
- PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
- return name;
-}
-
-
Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
Node* object = NewNode(javascript()->ToObject(), input);
PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
@@ -3750,28 +2847,6 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
}
-Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
- Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
- UpdateControlDependencyToLeaveFunction(control);
- return call;
-}
-
-
-Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
- Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
- UpdateControlDependencyToLeaveFunction(control);
- return call;
-}
-
-
Node* AstGraphBuilder::BuildReturn(Node* return_value) {
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -3796,11 +2871,7 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
TypeFeedbackId feedback_id) {
const Operator* js_op;
- BinaryOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
- hint = BinaryOperationHint::kAny;
- }
+ BinaryOperationHint hint = BinaryOperationHint::kAny;
switch (op) {
case Token::BIT_OR:
js_op = javascript()->BitwiseOr(hint);
@@ -3850,109 +2921,6 @@ Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
return nullptr;
}
-Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
- Handle<String> name,
- BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
- TypeofMode typeof_mode) {
- VariableMode mode = variable->mode();
-
- if (mode == DYNAMIC_GLOBAL) {
- uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
- if (bitset == kFullCheckRequired) return nullptr;
-
- // We are using two blocks to model fast and slow cases.
- BlockBuilder fast_block(this);
- BlockBuilder slow_block(this);
- environment()->Push(jsgraph()->TheHoleConstant());
- slow_block.BeginBlock();
- environment()->Pop();
- fast_block.BeginBlock();
-
- // Perform checks whether the fast mode applies, by looking for any
- // extension object which might shadow the optimistic declaration.
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- current_context());
- Node* check =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
- jsgraph()->TheHoleConstant());
- fast_block.BreakUnless(check, BranchHint::kTrue);
- }
-
- // Fast case, because variable is not shadowed.
- if (Node* constant = TryLoadGlobalConstant(name)) {
- environment()->Push(constant);
- } else {
- // Perform global slot load.
- Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
- PrepareFrameState(fast, bailout_id, combine);
- environment()->Push(fast);
- }
- slow_block.Break();
- environment()->Pop();
- fast_block.EndBlock();
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- Node* slow = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(slow, bailout_id, combine);
- environment()->Push(slow);
- slow_block.EndBlock();
-
- return environment()->Pop();
- }
-
- if (mode == DYNAMIC_LOCAL) {
- uint32_t bitset = ComputeBitsetForDynamicContext(variable);
- if (bitset == kFullCheckRequired) return nullptr;
-
- // We are using two blocks to model fast and slow cases.
- BlockBuilder fast_block(this);
- BlockBuilder slow_block(this);
- environment()->Push(jsgraph()->TheHoleConstant());
- slow_block.BeginBlock();
- environment()->Pop();
- fast_block.BeginBlock();
-
- // Perform checks whether the fast mode applies, by looking for any
- // extension object which might shadow the optimistic declaration.
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- current_context());
- Node* check =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
- jsgraph()->TheHoleConstant());
- fast_block.BreakUnless(check, BranchHint::kTrue);
- }
-
- // Fast case, because variable is not shadowed. Perform context slot load.
- Variable* local = variable->local_if_not_shadowed();
- DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
- Node* fast =
- BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
- environment()->Push(fast);
- slow_block.Break();
- environment()->Pop();
- fast_block.EndBlock();
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- Node* slow = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(slow, bailout_id, combine);
- environment()->Push(slow);
- slow_block.EndBlock();
-
- return environment()->Pop();
- }
-
- return nullptr;
-}
-
-
Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
switch (input->opcode()) {
case IrOpcode::kNumberConstant: {
@@ -3983,24 +2951,6 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
}
-Node* AstGraphBuilder::TryFastToName(Node* input) {
- switch (input->opcode()) {
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = HeapObjectMatcher(input).Value();
- if (object->IsName()) return input;
- break;
- }
- case IrOpcode::kJSToString:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSTypeOf:
- return input;
- default:
- break;
- }
- return nullptr;
-}
-
-
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
DCHECK_EQ(-1, info()->osr_expr_stack_height());
@@ -4073,7 +3023,6 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (!has_context && !has_frame_state && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
- bool inside_try_scope = try_nesting_level_ > 0;
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
if (has_frame_state) ++input_count_with_deps;
@@ -4107,18 +3056,6 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (result->op()->EffectOutputCount() > 0) {
environment_->UpdateEffectDependency(result);
}
- // Add implicit exception continuation for throwing nodes.
- if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
- // Copy the environment for the success continuation.
- Environment* success_env = environment()->CopyForConditional();
- const Operator* op = common()->IfException();
- Node* effect = environment()->GetEffectDependency();
- Node* on_exception = graph()->NewNode(op, effect, result);
- environment_->UpdateControlDependency(on_exception);
- environment_->UpdateEffectDependency(on_exception);
- execution_control()->ThrowValue(on_exception);
- set_environment(success_env);
- }
// Add implicit success continuation for throwing nodes.
if (!result->op()->HasProperty(Operator::kNoThrow)) {
const Operator* op = common()->IfSuccess();
@@ -4244,8 +3181,7 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
Node* osr_context = effect = contexts()->back();
int last = static_cast<int>(contexts()->size() - 1);
for (int i = last - 1; i >= 0; i--) {
- osr_context = effect =
- graph->NewNode(load_op, osr_context, osr_context, effect);
+ osr_context = effect = graph->NewNode(load_op, osr_context, effect);
contexts()->at(i) = osr_context;
}
UpdateEffectDependency(effect);
@@ -4364,10 +3300,9 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
- TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
- int inlining_id)
+ SourcePositionTable* source_positions, int inlining_id)
: AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
- loop_assignment, type_hint_analysis),
+ loop_assignment),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position(), inlining_id) {}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 2013f5053b..975e08094c 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -26,7 +26,6 @@ class Graph;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
-class TypeHintAnalysis;
// The AstGraphBuilder produces a high-level IR graph, based on an
@@ -39,8 +38,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
float invocation_frequency,
- LoopAssignmentAnalysis* loop_assignment = nullptr,
- TypeHintAnalysis* type_hint_analysis = nullptr);
+ LoopAssignmentAnalysis* loop_assignment = nullptr);
virtual ~AstGraphBuilder() {}
// Creates a graph by visiting the entire AST.
@@ -73,8 +71,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
class ControlScope;
class ControlScopeForBreakable;
class ControlScopeForIteration;
- class ControlScopeForCatch;
- class ControlScopeForFinally;
class Environment;
friend class ControlBuilder;
@@ -98,10 +94,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Nodes representing values in the activation record.
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
- SetOncePointer<Node> new_target_;
-
- // Tracks how many try-blocks are currently entered.
- int try_nesting_level_;
// Temporary storage for building node input lists.
int input_buffer_size_;
@@ -119,9 +111,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
- // Result of type hint analysis performed before graph creation.
- TypeHintAnalysis* type_hint_analysis_;
-
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
@@ -171,9 +160,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Get or create the node that represents the incoming function context.
Node* GetFunctionContext();
- // Get or create the node that represents the incoming new target value.
- Node* GetNewTarget();
-
// Get or create the node that represents the empty frame state.
Node* GetEmptyFrameState();
@@ -262,11 +248,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Named and keyed loads require a VectorSlotPair for successful lowering.
VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
- // Determine which contexts need to be checked for extension objects that
- // might shadow the optimistic declaration of dynamic lookup variables.
- uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
- uint32_t ComputeBitsetForDynamicContext(Variable* variable);
-
// Computes the frequency for JSCallFunction and JSCallConstruct nodes.
float ComputeCallFrequency(FeedbackVectorSlot slot) const;
@@ -284,15 +265,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
- // Builder to create an array of rest parameters if used.
- Node* BuildRestArgumentsArray(Variable* rest);
-
- // Builder that assigns to the {.this_function} internal variable if needed.
- Node* BuildThisFunctionVariable(Variable* this_function_var);
-
- // Builder that assigns to the {new.target} internal variable if needed.
- Node* BuildNewTargetVariable(Variable* new_target_var);
-
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* variable, Node* value,
Token::Value op, const VectorSlotPair& slot,
@@ -316,33 +288,18 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
- // Builders for super property loads and stores.
- Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
- Node* value);
- Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value);
- Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
- Handle<Name> name, const VectorSlotPair& feedback);
- Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
- const VectorSlotPair& feedback);
-
// Builders for global variable loads and stores.
Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
TypeofMode typeof_mode);
Node* BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
- // Builders for dynamic variable loads and stores.
- Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
- Node* BuildDynamicStore(Handle<Name> name, Node* value);
-
// Builders for accessing the function context.
Node* BuildLoadGlobalObject();
Node* BuildLoadNativeContextField(int index);
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
- Node* BuildToName(Node* input, BailoutId bailout_id);
Node* BuildToObject(Node* input, BailoutId bailout_id);
// Builder for adding the [[HomeObject]] to a value if the value came from a
@@ -354,8 +311,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildThrowError(Node* exception, BailoutId bailout_id);
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
Node* BuildThrowConstAssignError(BailoutId bailout_id);
- Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
- Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
@@ -363,9 +318,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
BailoutId bailout_id);
- // Builders for conditional errors.
- Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
-
// Builders for non-local control flow.
Node* BuildReturn(Node* return_value);
Node* BuildThrow(Node* exception_value);
@@ -387,17 +339,8 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Optimization for variable load from global object.
Node* TryLoadGlobalConstant(Handle<Name> name);
- // Optimization for variable load of dynamic lookup slot that is most likely
- // to resolve to a global slot or context slot (inferred from scope chain).
- Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
- BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
- TypeofMode typeof_mode);
-
// Optimizations for automatic type conversion.
Node* TryFastToBoolean(Node* input);
- Node* TryFastToName(Node* input);
// ===========================================================================
// The following visitation methods all recursively visit a subtree of the
@@ -408,7 +351,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Visit statements.
void VisitIfNotNull(Statement* stmt);
- void VisitInScope(Statement* stmt, Scope* scope, Node* context);
// Visit expressions.
void Visit(Expression* expr);
@@ -622,7 +564,6 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, float invocation_frequency,
LoopAssignmentAnalysis* loop_assignment,
- TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 82eaeb28a4..8239e3a058 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -201,6 +202,7 @@ void ALAA::VisitSpread(Spread* e) { UNREACHABLE(); }
void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
+void ALAA::VisitGetIterator(GetIterator* e) { UNREACHABLE(); }
void ALAA::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 9b36eb1068..0b7ad19af7 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -18,7 +18,9 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
jsgraph_(js_graph),
node_conditions_(zone, js_graph->graph()->NodeCount()),
zone_(zone),
- dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+ dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
BranchElimination::~BranchElimination() {}
@@ -143,20 +145,27 @@ Reduction BranchElimination::ReduceLoop(Node* node) {
Reduction BranchElimination::ReduceMerge(Node* node) {
// Shortcut for the case when we do not know anything about some
// input.
- for (int i = 0; i < node->InputCount(); i++) {
- if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+ Node::Inputs inputs = node->inputs();
+ for (Node* input : inputs) {
+ if (node_conditions_.Get(input) == nullptr) {
return UpdateConditions(node, nullptr);
}
}
- const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+ auto input_it = inputs.begin();
+
+ DCHECK_GT(inputs.count(), 0);
+
+ const ControlPathConditions* first = node_conditions_.Get(*input_it);
+ ++input_it;
// Make a copy of the first input's conditions and merge with the conditions
// from other inputs.
ControlPathConditions* conditions =
new (zone_->New(sizeof(ControlPathConditions)))
ControlPathConditions(*first);
- for (int i = 1; i < node->InputCount(); i++) {
- conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+ auto input_end = inputs.end();
+ for (; input_it != input_end; ++input_it) {
+ conditions->Merge(*(node_conditions_.Get(*input_it)));
}
return UpdateConditions(node, conditions);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
new file mode 100644
index 0000000000..f0e870739b
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -0,0 +1,622 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using namespace interpreter;
+
+BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
+ int register_count, Zone* zone)
+ : parameter_count_(parameter_count),
+ bit_vector_(new (zone)
+ BitVector(parameter_count + register_count, zone)) {}
+
+void BytecodeLoopAssignments::Add(interpreter::Register r) {
+ if (r.is_parameter()) {
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ } else {
+ bit_vector_->Add(parameter_count_ + r.index());
+ }
+}
+
+void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+ if (r.is_parameter()) {
+ DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+ } else {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index());
+ bit_vector_->Add(parameter_count_ + r.index() + 1);
+ }
+}
+
+void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
+ if (r.is_parameter()) {
+ DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(interpreter::Register(r.index() + 2).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+ } else {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index());
+ bit_vector_->Add(parameter_count_ + r.index() + 1);
+ bit_vector_->Add(parameter_count_ + r.index() + 2);
+ }
+}
+
+void BytecodeLoopAssignments::AddAll() { bit_vector_->AddAll(); }
+
+void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) {
+ bit_vector_->Union(*other.bit_vector_);
+}
+
+bool BytecodeLoopAssignments::ContainsParameter(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, parameter_count());
+ return bit_vector_->Contains(index);
+}
+
+bool BytecodeLoopAssignments::ContainsLocal(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, local_count());
+ return bit_vector_->Contains(parameter_count_ + index);
+}
+
+bool BytecodeLoopAssignments::ContainsAccumulator() const {
+ // TODO(leszeks): This assumes the accumulator is always assigned. This is
+ // probably correct, but that assignment is also probably dead, so we should
+ // check liveness.
+ return true;
+}
+
+BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
+ Zone* zone, bool do_liveness_analysis)
+ : bytecode_array_(bytecode_array),
+ do_liveness_analysis_(do_liveness_analysis),
+ zone_(zone),
+ loop_stack_(zone),
+ loop_end_index_queue_(zone),
+ end_to_header_(zone),
+ header_to_info_(zone),
+ liveness_map_(bytecode_array->length(), zone) {}
+
+namespace {
+
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
+ const BytecodeArrayAccessor& accessor) {
+ int num_operands = Bytecodes::NumberOfOperands(bytecode);
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ AccumulatorUse accumulator_use = Bytecodes::GetAccumulatorUse(bytecode);
+
+ if (accumulator_use == AccumulatorUse::kWrite) {
+ in_liveness.MarkAccumulatorDead();
+ }
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kRegOut: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness.MarkRegisterDead(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegOutPair: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ in_liveness.MarkRegisterDead(r.index());
+ in_liveness.MarkRegisterDead(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+ in_liveness.MarkRegisterDead(r.index());
+ in_liveness.MarkRegisterDead(r.index() + 1);
+ in_liveness.MarkRegisterDead(r.index() + 2);
+ }
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+ break;
+ }
+ }
+
+ if (accumulator_use == AccumulatorUse::kRead) {
+ in_liveness.MarkAccumulatorLive();
+ }
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kReg: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness.MarkRegisterLive(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegPair: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ in_liveness.MarkRegisterLive(r.index());
+ in_liveness.MarkRegisterLive(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+ in_liveness.MarkRegisterLive(r.index() + j);
+ }
+ }
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
+ break;
+ }
+ }
+}
+
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const BytecodeArrayAccessor& accessor,
+ const BytecodeLivenessMap& liveness_map) {
+ int current_offset = accessor.current_offset();
+ const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+
+ // Update from jump target (if any). Skip loops, we update these manually in
+ // the liveness iterations.
+ if (Bytecodes::IsForwardJump(bytecode)) {
+ int target_offset = accessor.GetJumpTargetOffset();
+ out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+ }
+
+ // Update from next bytecode (unless there isn't one or this is an
+ // unconditional jump).
+ if (next_bytecode_in_liveness != nullptr &&
+ !Bytecodes::IsUnconditionalJump(bytecode)) {
+ out_liveness.Union(*next_bytecode_in_liveness);
+ }
+
+ // Update from exception handler (if any).
+ if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+ int handler_context;
+ // TODO(leszeks): We should look up this range only once per entry.
+ HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+ int handler_offset =
+ table->LookupRange(current_offset, &handler_context, nullptr);
+
+ if (handler_offset != -1) {
+ out_liveness.Union(*liveness_map.GetInLiveness(handler_offset));
+ out_liveness.MarkRegisterLive(handler_context);
+ }
+ }
+}
+
+void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
+ const BytecodeArrayAccessor& accessor) {
+ int num_operands = Bytecodes::NumberOfOperands(bytecode);
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kRegOut: {
+ assignments.Add(accessor.GetRegisterOperand(i));
+ break;
+ }
+ case OperandType::kRegOutPair: {
+ assignments.AddPair(accessor.GetRegisterOperand(i));
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ assignments.AddTriple(accessor.GetRegisterOperand(i));
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+ break;
+ }
+ }
+}
+
+} // namespace
+
+void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+ loop_stack_.push({-1, nullptr});
+
+ BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+ int osr_loop_end_offset =
+ osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+
+ BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+
+ if (bytecode == Bytecode::kJumpLoop) {
+ // Every byte up to and including the last byte within the backwards jump
+ // instruction is considered part of the loop, set loop end accordingly.
+ int loop_end = current_offset + iterator.current_bytecode_size();
+ PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+
+ // Normally prefixed bytecodes are treated as if the prefix's offset was
+ // the actual bytecode's offset. However, the OSR id is the offset of the
+ // actual JumpLoop bytecode, so we need to find the location of that
+ // bytecode ignoring the prefix.
+ int jump_loop_offset = current_offset + iterator.current_prefix_offset();
+ bool is_osr_loop = (jump_loop_offset == osr_loop_end_offset);
+
+ // Check that is_osr_loop is set iff the osr_loop_end_offset is within
+ // this bytecode.
+ DCHECK(!is_osr_loop ||
+ iterator.OffsetWithinBytecode(osr_loop_end_offset));
+
+ // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
+ // need to make sure to considered everything to be assigned.
+ if (is_osr_loop) {
+ loop_stack_.top().loop_info->assignments().AddAll();
+ }
+
+ // Save the index so that we can do another pass later.
+ if (do_liveness_analysis_) {
+ loop_end_index_queue_.push_back(iterator.current_index());
+ }
+ } else if (loop_stack_.size() > 1) {
+ LoopStackEntry& current_loop = loop_stack_.top();
+ LoopInfo* current_loop_info = current_loop.loop_info;
+
+ // TODO(leszeks): Ideally, we'd only set values that were assigned in
+ // the loop *and* are live when the loop exits. However, this requires
+ // tracking the out-liveness of *all* loop exits, which is not
+ // information we currently have.
+ UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+
+ if (current_offset == current_loop.header_offset) {
+ loop_stack_.pop();
+ if (loop_stack_.size() > 1) {
+ // Propagate inner loop assignments to outer loop.
+ loop_stack_.top().loop_info->assignments().Union(
+ current_loop_info->assignments());
+ }
+ }
+ }
+
+ if (do_liveness_analysis_) {
+ BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
+ current_offset, bytecode_array()->register_count(), zone());
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+ }
+
+ DCHECK_EQ(loop_stack_.size(), 1u);
+ DCHECK_EQ(loop_stack_.top().header_offset, -1);
+
+ if (!do_liveness_analysis_) return;
+
+ // At this point, every bytecode has a valid in and out liveness, except for
+ // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
+ // analysis iterations can only add additional liveness bits that are pulled
+ // across these back edges.
+ //
+ // Furthermore, a loop header's in-liveness can only change based on any
+ // bytecodes *after* the loop end -- it cannot change as a result of the
+ // JumpLoop liveness being updated, as the only liveness bits than can be
+ // added to the loop body are those of the loop header.
+ //
+ // So, if we know that the liveness of bytecodes after a loop header won't
+ // change (e.g. because there are no loops in them, or we have already ensured
+ // those loops are valid), we can safely update the loop end and pass over the
+ // loop body, and then never have to pass over that loop end again, because we
+ // have shown that its target, the loop header, can't change from the entries
+ // after the loop, and can't change from any loop body pass.
+ //
+ // This means that in a pass, we can iterate backwards over the bytecode
+ // array, process any loops that we encounter, and on subsequent passes we can
+ // skip processing those loops (though we still have to process inner loops).
+ //
+ // Equivalently, we can queue up loop ends from back to front, and pass over
+ // the loops in that order, as this preserves both the bottom-to-top and
+ // outer-to-inner requirements.
+
+ for (int loop_end_index : loop_end_index_queue_) {
+ iterator.GoToIndex(loop_end_index);
+
+ DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+
+ int header_offset = iterator.GetJumpTargetOffset();
+ int end_offset = iterator.current_offset();
+
+ BytecodeLiveness& header_liveness =
+ liveness_map_.GetLiveness(header_offset);
+ BytecodeLiveness& end_liveness = liveness_map_.GetLiveness(end_offset);
+
+ if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) {
+ // Only update the loop body if the loop end liveness changed.
+ continue;
+ }
+ end_liveness.in->CopyFrom(*end_liveness.out);
+ next_bytecode_in_liveness = end_liveness.in;
+
+ // Advance into the loop body.
+ --iterator;
+ for (; iterator.current_offset() > header_offset; --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+ // Now we are at the loop header. Since the in-liveness of the header
+ // can't change, we need only to update the out-liveness.
+ UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
+ next_bytecode_in_liveness, iterator, liveness_map_);
+ }
+
+ DCHECK(LivenessIsValid());
+}
+
+void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
+ DCHECK(loop_header < loop_end);
+ DCHECK(loop_stack_.top().header_offset < loop_header);
+ DCHECK(end_to_header_.find(loop_end) == end_to_header_.end());
+ DCHECK(header_to_info_.find(loop_header) == header_to_info_.end());
+
+ int parent_offset = loop_stack_.top().header_offset;
+
+ end_to_header_.insert({loop_end, loop_header});
+ auto it = header_to_info_.insert(
+ {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(),
+ bytecode_array_->register_count(), zone_)});
+ // Get the loop info pointer from the output of insert.
+ LoopInfo* loop_info = &it.first->second;
+
+ loop_stack_.push({loop_header, loop_info});
+}
+
+bool BytecodeAnalysis::IsLoopHeader(int offset) const {
+ return header_to_info_.find(offset) != header_to_info_.end();
+}
+
+int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
+ auto loop_end_to_header = end_to_header_.upper_bound(offset);
+ // If there is no next end => offset is not in a loop.
+ if (loop_end_to_header == end_to_header_.end()) {
+ return -1;
+ }
+ // If the header preceeds the offset, this is the loop
+ //
+ // .> header <--loop_end_to_header
+ // |
+ // | <--offset
+ // |
+ // `- end
+ if (loop_end_to_header->second <= offset) {
+ return loop_end_to_header->second;
+ }
+ // Otherwise there is a (potentially nested) loop after this offset.
+ //
+ // <--offset
+ //
+ // .> header
+ // |
+ // | .> header <--loop_end_to_header
+ // | |
+ // | `- end
+ // |
+ // `- end
+ // We just return the parent of the next loop (might be -1).
+ DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end());
+
+ return header_to_info_.upper_bound(offset)->second.parent_offset();
+}
+
+const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
+ DCHECK(IsLoopHeader(header_offset));
+
+ return header_to_info_.find(header_offset)->second;
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
+ int offset) const {
+ if (!do_liveness_analysis_) return nullptr;
+
+ return liveness_map_.GetInLiveness(offset);
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
+ int offset) const {
+ if (!do_liveness_analysis_) return nullptr;
+
+ return liveness_map_.GetOutLiveness(offset);
+}
+
+std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+
+ for (; !iterator.done(); iterator.Advance()) {
+ int current_offset = iterator.current_offset();
+
+ const BitVector& in_liveness =
+ GetInLivenessFor(current_offset)->bit_vector();
+ const BitVector& out_liveness =
+ GetOutLivenessFor(current_offset)->bit_vector();
+
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ os << (in_liveness.Contains(i) ? "L" : ".");
+ }
+ os << " -> ";
+
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ os << (out_liveness.Contains(i) ? "L" : ".");
+ }
+
+ os << " | " << current_offset << ": ";
+ iterator.PrintTo(os) << std::endl;
+ }
+
+ return os;
+}
+
+#if DEBUG
+bool BytecodeAnalysis::LivenessIsValid() {
+ BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+
+ BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
+ zone());
+
+ int invalid_offset = -1;
+ int which_invalid = -1;
+
+ BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+ // Ensure that there are no liveness changes if we iterate one more time.
+ for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+
+ int current_offset = iterator.current_offset();
+
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ previous_liveness.CopyFrom(*liveness.out);
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ // UpdateOutLiveness skips kJumpLoop, so we update it manually.
+ if (bytecode == Bytecode::kJumpLoop) {
+ int target_offset = iterator.GetJumpTargetOffset();
+ liveness.out->Union(*liveness_map_.GetInLiveness(target_offset));
+ }
+
+ if (!liveness.out->Equals(previous_liveness)) {
+ // Reset the invalid liveness.
+ liveness.out->CopyFrom(previous_liveness);
+ invalid_offset = current_offset;
+ which_invalid = 1;
+ break;
+ }
+
+ previous_liveness.CopyFrom(*liveness.in);
+
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ if (!liveness.in->Equals(previous_liveness)) {
+ // Reset the invalid liveness.
+ liveness.in->CopyFrom(previous_liveness);
+ invalid_offset = current_offset;
+ which_invalid = 0;
+ break;
+ }
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+
+ if (invalid_offset != -1) {
+ OFStream of(stderr);
+ of << "Invalid liveness:" << std::endl;
+
+ // Dump the bytecode, annotated with the liveness and marking loops.
+
+ int loop_indent = 0;
+
+ BytecodeArrayIterator forward_iterator(bytecode_array());
+ for (; !forward_iterator.done(); forward_iterator.Advance()) {
+ int current_offset = forward_iterator.current_offset();
+ const BitVector& in_liveness =
+ GetInLivenessFor(current_offset)->bit_vector();
+ const BitVector& out_liveness =
+ GetOutLivenessFor(current_offset)->bit_vector();
+
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ of << (in_liveness.Contains(i) ? 'L' : '.');
+ }
+
+ of << " | ";
+
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ of << (out_liveness.Contains(i) ? 'L' : '.');
+ }
+
+ of << " : " << current_offset << " : ";
+
+ // Draw loop back edges by indentin everything between loop headers and
+ // jump loop instructions.
+ if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+ loop_indent--;
+ }
+ for (int i = 0; i < loop_indent; ++i) {
+ of << " | ";
+ }
+ if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+ of << " `-" << current_offset;
+ } else if (IsLoopHeader(current_offset)) {
+ of << " .>" << current_offset;
+ loop_indent++;
+ }
+ forward_iterator.PrintTo(of) << std::endl;
+
+ if (current_offset == invalid_offset) {
+ // Underline the invalid liveness.
+ if (which_invalid == 0) {
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ of << '^';
+ }
+ } else {
+ for (int i = 0; i < in_liveness.length() + 3; ++i) {
+ of << ' ';
+ }
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ of << '^';
+ }
+ }
+
+ // Make sure to draw the loop indentation marks on this additional line.
+ of << " : " << current_offset << " : ";
+ for (int i = 0; i < loop_indent; ++i) {
+ of << " | ";
+ }
+
+ of << std::endl;
+ }
+ }
+ }
+
+ return invalid_offset == -1;
+}
+#endif
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
new file mode 100644
index 0000000000..ad93f8a652
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_ANALYSIS_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
+ public:
+ BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
+
+ void Add(interpreter::Register r);
+ void AddPair(interpreter::Register r);
+ void AddTriple(interpreter::Register r);
+ void AddAll();
+ void Union(const BytecodeLoopAssignments& other);
+
+ bool ContainsParameter(int index) const;
+ bool ContainsLocal(int index) const;
+ bool ContainsAccumulator() const;
+
+ int parameter_count() const { return parameter_count_; }
+ int local_count() const { return bit_vector_->length() - parameter_count_; }
+
+ private:
+ int parameter_count_;
+ BitVector* bit_vector_;
+};
+
+struct V8_EXPORT_PRIVATE LoopInfo {
+ public:
+ LoopInfo(int parent_offset, int parameter_count, int register_count,
+ Zone* zone)
+ : parent_offset_(parent_offset),
+ assignments_(parameter_count, register_count, zone) {}
+
+ int parent_offset() const { return parent_offset_; }
+
+ BytecodeLoopAssignments& assignments() { return assignments_; }
+ const BytecodeLoopAssignments& assignments() const { return assignments_; }
+
+ private:
+ // The offset to the parent loop, or -1 if there is no parent.
+ int parent_offset_;
+ BytecodeLoopAssignments assignments_;
+};
+
+class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
+ bool do_liveness_analysis);
+
+ // Analyze the bytecodes to find the loop ranges, loop nesting, loop
+ // assignments and liveness, under the assumption that there is an OSR bailout
+ // at {osr_bailout_id}.
+ //
+ // No other methods in this class return valid information until this has been
+ // called.
+ void Analyze(BailoutId osr_bailout_id);
+
+ // Return true if the given offset is a loop header
+ bool IsLoopHeader(int offset) const;
+ // Get the loop header offset of the containing loop for arbitrary
+ // {offset}, or -1 if the {offset} is not inside any loop.
+ int GetLoopOffsetFor(int offset) const;
+ // Get the loop info of the loop header at {header_offset}.
+ const LoopInfo& GetLoopInfoFor(int header_offset) const;
+
+ // Gets the in-liveness for the bytecode at {offset}.
+ const BytecodeLivenessState* GetInLivenessFor(int offset) const;
+
+ // Gets the out-liveness for the bytecode at {offset}.
+ const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+
+ std::ostream& PrintLivenessTo(std::ostream& os) const;
+
+ private:
+ struct LoopStackEntry {
+ int header_offset;
+ LoopInfo* loop_info;
+ };
+
+ void PushLoop(int loop_header, int loop_end);
+
+#if DEBUG
+ bool LivenessIsValid();
+#endif
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ private:
+ Handle<BytecodeArray> bytecode_array_;
+ bool do_liveness_analysis_;
+ Zone* zone_;
+
+ ZoneStack<LoopStackEntry> loop_stack_;
+ ZoneVector<int> loop_end_index_queue_;
+
+ ZoneMap<int, int> end_to_header_;
+ ZoneMap<int, LoopInfo> header_to_info_;
+
+ BytecodeLivenessMap liveness_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.cc b/deps/v8/src/compiler/bytecode-branch-analysis.cc
deleted file mode 100644
index 4e96a53aeb..0000000000
--- a/deps/v8/src/compiler/bytecode-branch-analysis.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-branch-analysis.h"
-
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeBranchAnalysis::BytecodeBranchAnalysis(
- Handle<BytecodeArray> bytecode_array, Zone* zone)
- : bytecode_array_(bytecode_array),
- is_backward_target_(bytecode_array->length(), zone),
- is_forward_target_(bytecode_array->length(), zone),
- zone_(zone) {}
-
-void BytecodeBranchAnalysis::Analyze() {
- interpreter::BytecodeArrayIterator iterator(bytecode_array());
- while (!iterator.done()) {
- interpreter::Bytecode bytecode = iterator.current_bytecode();
- int current_offset = iterator.current_offset();
- if (interpreter::Bytecodes::IsJump(bytecode)) {
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- }
- iterator.Advance();
- }
-}
-
-void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
- if (source_offset < target_offset) {
- is_forward_target_.Add(target_offset);
- } else {
- is_backward_target_.Add(target_offset);
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.h b/deps/v8/src/compiler/bytecode-branch-analysis.h
deleted file mode 100644
index 7d32da8281..0000000000
--- a/deps/v8/src/compiler/bytecode-branch-analysis.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-
-#include "src/bit-vector.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-// A class for identifying branch targets within a bytecode array.
-// This information can be used to construct the local control flow
-// logic for high-level IR graphs built from bytecode.
-//
-// N.B. If this class is used to determine loop headers, then such a
-// usage relies on the only backwards branches in bytecode being jumps
-// back to loop headers.
-class BytecodeBranchAnalysis BASE_EMBEDDED {
- public:
- BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
-
- // Analyze the bytecodes to find the branch sites and their
- // targets. No other methods in this class return valid information
- // until this has been called.
- void Analyze();
-
- // Returns true if there are any forward branches to the bytecode at
- // |offset|.
- bool forward_branches_target(int offset) const {
- return is_forward_target_.Contains(offset);
- }
-
- // Returns true if there are any backward branches to the bytecode
- // at |offset|.
- bool backward_branches_target(int offset) const {
- return is_backward_target_.Contains(offset);
- }
-
- private:
- void AddBranch(int origin_offset, int target_offset);
-
- Zone* zone() const { return zone_; }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- Handle<BytecodeArray> bytecode_array_;
- BitVector is_backward_target_;
- BitVector is_forward_target_;
- Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 34b50df308..d22746d9ec 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -7,10 +7,10 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
-#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
@@ -36,7 +36,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
- void MarkAllRegistersLive();
void BindAccumulator(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -57,7 +56,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
- bool owner_has_exception);
+ bool owner_has_exception,
+ const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
Node* GetControlDependency() const { return control_dependency_; }
@@ -68,30 +68,28 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* Context() const { return context_; }
void SetContext(Node* new_context) { context_ = new_context; }
- Environment* CopyForConditional();
- Environment* CopyForLoop();
- Environment* CopyForOsrEntry();
+ Environment* Copy();
void Merge(Environment* other);
- void PrepareForOsrEntry();
- void PrepareForLoopExit(Node* loop);
+ void PrepareForOsrEntry();
+ void PrepareForLoop(const BytecodeLoopAssignments& assignments);
+ void PrepareForLoopExit(Node* loop,
+ const BytecodeLoopAssignments& assignments);
private:
- Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
- void PrepareForLoop();
+ explicit Environment(const Environment* copy);
- bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
- void UpdateStateValues(Node** state_values, int offset, int count);
+ bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
+ void UpdateStateValues(Node** state_values, Node** values, int count);
+ void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
+ const BitVector* liveness);
int RegisterToValuesIndex(interpreter::Register the_register) const;
- bool IsLivenessBlockConsistent() const;
-
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
CommonOperatorBuilder* common() const { return builder_->common(); }
BytecodeGraphBuilder* builder() const { return builder_; }
- LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
const NodeVector* values() const { return &values_; }
NodeVector* values() { return &values_; }
int register_base() const { return register_base_; }
@@ -100,7 +98,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
BytecodeGraphBuilder* builder_;
int register_count_;
int parameter_count_;
- LivenessAnalyzerBlock* liveness_block_;
Node* context_;
Node* control_dependency_;
Node* effect_dependency_;
@@ -124,9 +121,6 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
: builder_(builder),
register_count_(register_count),
parameter_count_(parameter_count),
- liveness_block_(builder->is_liveness_analysis_enabled_
- ? builder_->liveness_analyzer()->NewBlock()
- : nullptr),
context_(context),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
@@ -161,12 +155,10 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
}
BytecodeGraphBuilder::Environment::Environment(
- const BytecodeGraphBuilder::Environment* other,
- LivenessAnalyzerBlock* liveness_block)
+ const BytecodeGraphBuilder::Environment* other)
: builder_(other->builder_),
register_count_(other->register_count_),
parameter_count_(other->parameter_count_),
- liveness_block_(liveness_block),
context_(other->context_),
control_dependency_(other->control_dependency_),
effect_dependency_(other->effect_dependency_),
@@ -189,16 +181,7 @@ int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
}
}
-bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
- return !builder_->IsLivenessAnalysisEnabled() ==
- (liveness_block() == nullptr);
-}
-
Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->LookupAccumulator();
- }
return values()->at(accumulator_base_);
}
@@ -213,32 +196,15 @@ Node* BytecodeGraphBuilder::Environment::LookupRegister(
return builder()->GetNewTarget();
} else {
int values_index = RegisterToValuesIndex(the_register);
- if (liveness_block() != nullptr && !the_register.is_parameter()) {
- DCHECK(IsLivenessBlockConsistent());
- liveness_block()->Lookup(the_register.index());
- }
return values()->at(values_index);
}
}
-void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- for (int i = 0; i < register_count(); ++i) {
- liveness_block()->Lookup(i);
- }
- }
-}
-
void BytecodeGraphBuilder::Environment::BindAccumulator(
Node* node, FrameStateAttachmentMode mode) {
if (mode == FrameStateAttachmentMode::kAttachFrameState) {
builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
}
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->BindAccumulator();
- }
values()->at(accumulator_base_) = node;
}
@@ -251,10 +217,6 @@ void BytecodeGraphBuilder::Environment::BindRegister(
accumulator_base_ - values_index));
}
values()->at(values_index) = node;
- if (liveness_block() != nullptr && !the_register.is_parameter()) {
- DCHECK(IsLivenessBlockConsistent());
- liveness_block()->Bind(the_register.index());
- }
}
void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
@@ -278,45 +240,13 @@ void BytecodeGraphBuilder::Environment::RecordAfterState(
}
}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForLoop() {
- PrepareForLoop();
- if (liveness_block() != nullptr) {
- // Finish the current block before copying.
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, liveness_block());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
- return new (zone())
- Environment(this, builder_->liveness_analyzer()->NewBlock());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() {
- LivenessAnalyzerBlock* copy_liveness_block = nullptr;
- if (liveness_block() != nullptr) {
- copy_liveness_block =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, copy_liveness_block);
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::Environment::Copy() {
+ return new (zone()) Environment(this);
}
void BytecodeGraphBuilder::Environment::Merge(
BytecodeGraphBuilder::Environment* other) {
- if (builder_->is_liveness_analysis_enabled_) {
- if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
- liveness_block_ =
- builder()->liveness_analyzer()->NewBlock(liveness_block());
- }
- liveness_block()->AddPredecessor(other->liveness_block());
- }
-
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder()->MergeControl(GetControlDependency(),
@@ -337,8 +267,8 @@ void BytecodeGraphBuilder::Environment::Merge(
}
}
-
-void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+void BytecodeGraphBuilder::Environment::PrepareForLoop(
+ const BytecodeLoopAssignments& assignments) {
// Create a control node for the loop header.
Node* control = builder()->NewLoop();
@@ -346,11 +276,23 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop() {
Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
UpdateEffectDependency(effect);
- // Assume everything in the loop is updated.
+ // Create Phis for any values that may be updated by the end of the loop.
context_ = builder()->NewPhi(1, context_, control);
- int size = static_cast<int>(values()->size());
- for (int i = 0; i < size; i++) {
- values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+ for (int i = 0; i < parameter_count(); i++) {
+ if (assignments.ContainsParameter(i)) {
+ values_[i] = builder()->NewPhi(1, values_[i], control);
+ }
+ }
+ for (int i = 0; i < register_count(); i++) {
+ if (assignments.ContainsLocal(i)) {
+ int index = register_base() + i;
+ values_[index] = builder()->NewPhi(1, values_[index], control);
+ }
+ }
+
+ if (assignments.ContainsAccumulator()) {
+ values_[accumulator_base()] =
+ builder()->NewPhi(1, values_[accumulator_base()], control);
}
// Connect to the loop end.
@@ -384,7 +326,7 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
BailoutId loop_id(builder_->bytecode_iterator().current_offset());
Node* frame_state =
- Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+ Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
Node* checkpoint =
graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
UpdateEffectDependency(checkpoint);
@@ -402,22 +344,22 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
- Node** state_values, int offset, int count) {
+ Node** state_values, Node** values, int count) {
if (*state_values == nullptr) {
return true;
}
- DCHECK_EQ((*state_values)->InputCount(), count);
- DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ Node::Inputs inputs = (*state_values)->inputs();
+ DCHECK_EQ(inputs.count(), count);
for (int i = 0; i < count; i++) {
- if ((*state_values)->InputAt(i) != env_values[i]) {
+ if (inputs[i] != values[i]) {
return true;
}
}
return false;
}
-void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
+ Node* loop, const BytecodeLoopAssignments& assignments) {
DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
Node* control = GetControlDependency();
@@ -431,34 +373,65 @@ void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
GetEffectDependency(), loop_exit);
UpdateEffectDependency(effect_rename);
- // TODO(jarin) We should also rename context here. However, uncoditional
+ // TODO(jarin) We should also rename context here. However, unconditional
// renaming confuses global object and native context specialization.
// We should only rename if the context is assigned in the loop.
- // Rename the environmnent values.
- for (size_t i = 0; i < values_.size(); i++) {
- Node* rename =
- graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
- values_[i] = rename;
+ // Rename the environment values if they were assigned in the loop.
+ for (int i = 0; i < parameter_count(); i++) {
+ if (assignments.ContainsParameter(i)) {
+ Node* rename =
+ graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+ values_[i] = rename;
+ }
+ }
+ for (int i = 0; i < register_count(); i++) {
+ if (assignments.ContainsLocal(i)) {
+ Node* rename = graph()->NewNode(common()->LoopExitValue(),
+ values_[register_base() + i], loop_exit);
+ values_[register_base() + i] = rename;
+ }
+ }
+
+ if (assignments.ContainsAccumulator()) {
+ Node* rename = graph()->NewNode(common()->LoopExitValue(),
+ values_[accumulator_base()], loop_exit);
+ values_[accumulator_base()] = rename;
}
}
void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
- int offset,
+ Node** values,
int count) {
- if (StateValuesRequireUpdate(state_values, offset, count)) {
- const Operator* op = common()->StateValues(count);
- (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+ if (StateValuesRequireUpdate(state_values, values, count)) {
+ const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
+ (*state_values) = graph()->NewNode(op, count, values);
}
}
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+ Node** state_values, Node** values, int count, const BitVector* liveness) {
+ *state_values = builder_->state_values_cache_.GetNodeForValues(
+ values, static_cast<size_t>(count), liveness);
+}
+
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
- bool owner_has_exception) {
- UpdateStateValues(&parameters_state_values_, 0, parameter_count());
- UpdateStateValues(&registers_state_values_, register_base(),
- register_count());
- UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+ bool owner_has_exception, const BytecodeLivenessState* liveness) {
+ UpdateStateValues(&parameters_state_values_, &values()->at(0),
+ parameter_count());
+
+ // TODO(leszeks): We should pass a view of the liveness bitvector here, with
+ // offset and count, rather than passing the entire bitvector and assuming
+ // that register liveness starts at offset 0.
+ UpdateStateValuesWithCache(&registers_state_values_,
+ &values()->at(register_base()), register_count(),
+ liveness ? &liveness->bit_vector() : nullptr);
+
+ Node* accumulator_value = liveness == nullptr || liveness->AccumulatorIsLive()
+ ? values()->at(accumulator_base())
+ : builder()->jsgraph()->OptimizedOutConstant();
+ UpdateStateValues(&accumulator_state_values_, &accumulator_value, 1);
const Operator* op = common()->FrameState(
bailout_id, combine, builder()->frame_state_function_info());
@@ -467,51 +440,40 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
builder()->graph()->start());
- if (liveness_block() != nullptr) {
- // If the owning node has an exception, register the checkpoint to the
- // predecessor so that the checkpoint is used for both the normal and the
- // exceptional paths. Yes, this is a terrible hack and we might want
- // to use an explicit frame state for the exceptional path.
- if (owner_has_exception) {
- liveness_block()->GetPredecessor()->Checkpoint(result);
- } else {
- liveness_block()->Checkpoint(result);
- }
- }
-
return result;
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency, SourcePositionTable* source_positions,
- int inlining_id)
+ Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
+ Handle<TypeFeedbackVector> feedback_vector, BailoutId osr_ast_id,
+ JSGraph* jsgraph, float invocation_frequency,
+ SourcePositionTable* source_positions, int inlining_id)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
- bytecode_array_(handle(info->shared_info()->bytecode_array())),
+ bytecode_array_(handle(shared_info->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
- feedback_vector_(handle(info->closure()->feedback_vector())),
+ feedback_vector_(feedback_vector),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
- bytecode_array()->register_count(), info->shared_info())),
- osr_ast_id_(info->osr_ast_id()),
+ bytecode_array()->register_count(), shared_info)),
+ bytecode_iterator_(nullptr),
+ bytecode_analysis_(nullptr),
+ environment_(nullptr),
+ osr_ast_id_(osr_ast_id),
+ osr_loop_offset_(-1),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_controls_(local_zone),
- is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
- info->is_deoptimization_enabled()),
+ is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness),
state_values_cache_(jsgraph),
- liveness_analyzer_(
- static_cast<size_t>(bytecode_array()->register_count()), true,
- local_zone),
source_positions_(source_positions),
- start_position_(info->shared_info()->start_position(), inlining_id) {}
+ start_position_(shared_info->start_position(), inlining_id) {}
Node* BytecodeGraphBuilder::GetNewTarget() {
if (!new_target_.is_set()) {
@@ -551,8 +513,10 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
const Operator* op =
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
- Node* native_context = NewNode(op, environment()->Context());
- return NewNode(javascript()->LoadContext(0, index, true), native_context);
+ Node* native_context = NewNode(op);
+ Node* result = NewNode(javascript()->LoadContext(0, index, true));
+ NodeProperties::ReplaceContextInput(result, native_context);
+ return result;
}
@@ -587,8 +551,6 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
- ClearNonLiveSlotsInFrameStates();
-
return true;
}
@@ -601,8 +563,13 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
+
+ const BytecodeLivenessState* liveness_before =
+ bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset());
+
Node* frame_state_before = environment()->Checkpoint(
- bailout_id, OutputFrameStateCombine::Ignore(), false);
+ bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
}
}
@@ -617,40 +584,36 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
bool has_exception = NodeProperties::IsExceptionalCall(node);
- Node* frame_state_after =
- environment()->Checkpoint(bailout_id, combine, has_exception);
- NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
- }
-}
-void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
- if (!IsLivenessAnalysisEnabled()) {
- return;
- }
- NonLiveFrameStateSlotReplacer replacer(
- &state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), true, local_zone());
- liveness_analyzer()->Run(&replacer);
- if (FLAG_trace_environment_liveness) {
- OFStream os(stdout);
- liveness_analyzer()->Print(os);
+ const BytecodeLivenessState* liveness_after =
+ bytecode_analysis()->GetOutLivenessFor(
+ bytecode_iterator().current_offset());
+
+ Node* frame_state_after = environment()->Checkpoint(
+ bailout_id, combine, has_exception, liveness_after);
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
- BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
- BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
- analysis.Analyze();
- loop_analysis.Analyze();
- set_branch_analysis(&analysis);
- set_loop_analysis(&loop_analysis);
+ BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
+ FLAG_analyze_environment_liveness);
+ bytecode_analysis.Analyze(osr_ast_id_);
+ set_bytecode_analysis(&bytecode_analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
SourcePositionTableIterator source_position_iterator(
bytecode_array()->source_position_table());
+ if (FLAG_trace_environment_liveness) {
+ OFStream of(stdout);
+
+ bytecode_analysis.PrintLivenessTo(of);
+ }
+
BuildOSRNormalEntryPoint();
+
for (; !iterator.done(); iterator.Advance()) {
int current_offset = iterator.current_offset();
UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
@@ -658,7 +621,6 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
SwitchToMergeEnvironment(current_offset);
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
- BuildOSRLoopEntryPoint(current_offset);
// Skip the first stack check if stack_check is false
if (!stack_check &&
@@ -677,8 +639,7 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
}
}
}
-
- set_branch_analysis(nullptr);
+ set_bytecode_analysis(nullptr);
set_bytecode_iterator(nullptr);
DCHECK(exception_handlers_.empty());
}
@@ -741,27 +702,33 @@ void BytecodeGraphBuilder::VisitMov() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
+ uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
feedback_vector()->GetKind(feedback.slot()));
- Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- return NewNode(op, GetFunctionClosure());
+ return NewNode(op);
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
- TypeofMode::NOT_INSIDE_TYPEOF);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node =
+ BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
- TypeofMode::INSIDE_TYPEOF);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node =
+ BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -774,7 +741,7 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
Node* value = environment()->LookupAccumulator();
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
- Node* node = NewNode(op, value, GetFunctionClosure());
+ Node* node = NewNode(op, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -786,6 +753,23 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
BuildStoreGlobal(LanguageMode::STRICT);
}
+void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
+ PrepareEagerCheckpoint();
+
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* name =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* value = environment()->LookupAccumulator();
+ int flags = bytecode_iterator().GetFlagOperand(2);
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+
+ const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
+ Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitLdaContextSlot() {
// TODO(mythria): immutable flag is also set to false. This information is not
// available in bytecode array. update this code when the implementation
@@ -793,9 +777,10 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
const Operator* op = javascript()->LoadContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
bytecode_iterator().GetIndexOperand(1), false);
+ Node* node = NewNode(op);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node = NewNode(op, context);
+ NodeProperties::ReplaceContextInput(node, context);
environment()->BindAccumulator(node);
}
@@ -805,8 +790,7 @@ void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
// changes.
const Operator* op = javascript()->LoadContext(
0, bytecode_iterator().GetIndexOperand(0), false);
- Node* context = environment()->Context();
- Node* node = NewNode(op, context);
+ Node* node = NewNode(op);
environment()->BindAccumulator(node);
}
@@ -814,18 +798,18 @@ void BytecodeGraphBuilder::VisitStaContextSlot() {
const Operator* op = javascript()->StoreContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
bytecode_iterator().GetIndexOperand(1));
+ Node* value = environment()->LookupAccumulator();
+ Node* node = NewNode(op, value);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* value = environment()->LookupAccumulator();
- NewNode(op, context, value);
+ NodeProperties::ReplaceContextInput(node, context);
}
void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
const Operator* op =
javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
- Node* context = environment()->Context();
Node* value = environment()->LookupAccumulator();
- NewNode(op, context, value);
+ NewNode(op, value);
}
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
@@ -857,15 +841,14 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
// the same scope as the variable itself has no way of shadowing it.
for (uint32_t d = 0; d < depth; d++) {
Node* extension_slot =
- NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false));
Node* check_no_extension =
NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
extension_slot, jsgraph()->TheHoleConstant());
NewBranch(check_no_extension);
- Environment* true_environment = environment()->CopyForConditional();
+ Environment* true_environment = environment()->Copy();
{
NewIfFalse();
@@ -904,8 +887,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
const Operator* op = javascript()->LoadContext(depth, slot_index, false);
- Node* context = environment()->Context();
- environment()->BindAccumulator(NewNode(op, context));
+ environment()->BindAccumulator(NewNode(op));
}
// Only build the slow path if there were any slow-path checks.
@@ -950,8 +932,10 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Node* node =
- BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1018,7 +1002,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
+ Node* node = NewNode(op, object);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1031,7 +1015,7 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ Node* node = NewNode(op, object, key);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1046,7 +1030,7 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
- Node* node = NewNode(op, object, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1069,7 +1053,7 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, key, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1084,9 +1068,8 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
void BytecodeGraphBuilder::VisitLdaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module =
- NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ Node* module = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
Node* value = NewNode(javascript()->LoadModule(cell_index), module);
environment()->BindAccumulator(value);
}
@@ -1094,9 +1077,8 @@ void BytecodeGraphBuilder::VisitLdaModuleVariable() {
void BytecodeGraphBuilder::VisitStaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module =
- NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ Node* module = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
Node* value = environment()->LookupAccumulator();
NewNode(javascript()->StoreModule(cell_index), module, value);
}
@@ -1117,12 +1099,14 @@ void BytecodeGraphBuilder::VisitPopContext() {
void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
PretenureFlag tenured =
interpreter::CreateClosureFlags::PretenuredBit::decode(
- bytecode_iterator().GetFlagOperand(1))
+ bytecode_iterator().GetFlagOperand(2))
? TENURED
: NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+ const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
@@ -1138,7 +1122,15 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
- const Operator* op = javascript()->CreateFunctionContext(slots);
+ const Operator* op =
+ javascript()->CreateFunctionContext(slots, FUNCTION_SCOPE);
+ Node* context = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateEvalContext() {
+ uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
+ const Operator* op = javascript()->CreateFunctionContext(slots, EVAL_SCOPE);
Node* context = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(context);
}
@@ -1198,16 +1190,21 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ConstantElementsPair> constant_elements =
+ Handle<ConstantElementsPair>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+ int literal_flags =
+ interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
// Disable allocation site mementos. Only unoptimized code will collect
// feedback about allocation site. Once the code is optimized we expect the
// data to converge. So, we disable allocation site mementos in optimized
// code. We can revisit this when we have data to the contrary.
literal_flags |= ArrayLiteral::kDisableMementos;
- int number_of_elements = constant_elements->length();
+ // TODO(mstarzinger): Thread through number of elements. The below number is
+ // only an estimate and does not match {ArrayLiteral::values::length}.
+ int number_of_elements = constant_elements->constant_values()->length();
Node* literal = NewNode(
javascript()->CreateLiteralArray(constant_elements, literal_flags,
literal_index, number_of_elements),
@@ -1223,7 +1220,8 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
int literal_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
- // TODO(mstarzinger): Thread through number of properties.
+ // TODO(mstarzinger): Thread through number of properties. The below number is
+ // only an estimate and does not match {ObjectLiteral::properties_count}.
int number_of_properties = constant_properties->length() / 2;
Node* literal = NewNode(
javascript()->CreateLiteralObject(constant_properties, literal_flags,
@@ -1340,6 +1338,17 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitNewWithSpread() {
+ PrepareEagerCheckpoint();
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(0);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(1);
+
+ const Operator* op =
+ javascript()->CallConstructWithSpread(static_cast<int>(arg_count));
+ Node* value = ProcessCallRuntimeArguments(op, first_arg, arg_count);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
@@ -1607,6 +1616,13 @@ void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
BuildDelete(LanguageMode::SLOPPY);
}
+void BytecodeGraphBuilder::VisitGetSuperConstructor() {
+ Node* node = NewNode(javascript()->GetSuperConstructor(),
+ environment()->LookupAccumulator());
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+ Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
PrepareEagerCheckpoint();
Node* left =
@@ -1652,8 +1668,30 @@ void BytecodeGraphBuilder::VisitTestInstanceOf() {
BuildCompareOp(javascript()->InstanceOf());
}
+void BytecodeGraphBuilder::VisitTestUndetectable() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* node = NewNode(jsgraph()->simplified()->ObjectIsUndetectable(), object);
+ environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitTestNull() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ object, jsgraph()->NullConstant());
+ environment()->BindAccumulator(result);
+}
+
+void BytecodeGraphBuilder::VisitTestUndefined() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ object, jsgraph()->UndefinedConstant());
+ environment()->BindAccumulator(result);
+}
+
void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
- PrepareEagerCheckpoint();
Node* value = NewNode(js_op, environment()->LookupAccumulator());
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
Environment::kAttachFrameState);
@@ -1705,6 +1743,12 @@ void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
BuildJumpIfNotHole();
}
+void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
+
+void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
+ BuildJumpIfJSReceiver();
+}
+
void BytecodeGraphBuilder::VisitJumpIfNull() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
@@ -1729,6 +1773,12 @@ void BytecodeGraphBuilder::VisitStackCheck() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitSetPendingMessage() {
+ Node* previous_message = NewNode(javascript()->LoadMessage());
+ NewNode(javascript()->StoreMessage(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(previous_message);
+}
+
void BytecodeGraphBuilder::VisitReturn() {
BuildLoopExitsForFunctionExit();
Node* pop_node = jsgraph()->ZeroConstant();
@@ -1742,7 +1792,6 @@ void BytecodeGraphBuilder::VisitDebugger() {
Node* call =
NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
environment()->BindAccumulator(call, Environment::kAttachFrameState);
- environment()->MarkAllRegistersLive();
}
// We cannot create a graph from the debugger copy of the bytecode array.
@@ -1866,33 +1915,43 @@ void BytecodeGraphBuilder::VisitIllegal() {
void BytecodeGraphBuilder::VisitNop() {}
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
- if (merge_environments_[current_offset] != nullptr) {
+ auto it = merge_environments_.find(current_offset);
+ if (it != merge_environments_.end()) {
if (environment() != nullptr) {
- merge_environments_[current_offset]->Merge(environment());
+ it->second->Merge(environment());
}
- set_environment(merge_environments_[current_offset]);
+ set_environment(it->second);
}
}
void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
- if (branch_analysis()->backward_branches_target(current_offset)) {
- // Add loop header and store a copy so we can connect merged back
- // edge inputs to the loop header.
- merge_environments_[current_offset] = environment()->CopyForLoop();
+ if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+ const LoopInfo& loop_info =
+ bytecode_analysis()->GetLoopInfoFor(current_offset);
+
+ // Add loop header.
+ environment()->PrepareForLoop(loop_info.assignments());
+
+ BuildOSRLoopEntryPoint(current_offset);
+
+ // Store a copy of the environment so we can connect merged back edge inputs
+ // to the loop header.
+ merge_environments_[current_offset] = environment()->Copy();
}
}
void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
BuildLoopExitsForBranch(target_offset);
- if (merge_environments_[target_offset] == nullptr) {
+ Environment*& merge_environment = merge_environments_[target_offset];
+ if (merge_environment == nullptr) {
// Append merge nodes to the environment. We may merge here with another
// environment. So add a place holder for merge nodes. We may add redundant
// but will be eliminated in a later pass.
// TODO(mstarzinger): Be smarter about this!
NewMerge();
- merge_environments_[target_offset] = environment();
+ merge_environment = environment();
} else {
- merge_environments_[target_offset]->Merge(environment());
+ merge_environment->Merge(environment());
}
set_environment(nullptr);
}
@@ -1903,13 +1962,14 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
}
void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
- if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+ DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
+
+ if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
// For OSR add a special {OsrLoopEntry} node into the current loop header.
// It will be turned into a usable entry by the OSR deconstruction.
- Environment* loop_env = merge_environments_[current_offset];
- Environment* osr_env = loop_env->CopyForOsrEntry();
+ Environment* osr_env = environment()->Copy();
osr_env->PrepareForOsrEntry();
- loop_env->Merge(osr_env);
+ environment()->Merge(osr_env);
}
}
@@ -1918,9 +1978,11 @@ void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
// For OSR add an {OsrNormalEntry} as the the top-level environment start.
// It will be replaced with {Dead} by the OSR deconstruction.
NewNode(common()->OsrNormalEntry());
- // Note that the requested OSR entry point must be the target of a backward
- // branch, otherwise there will not be a proper loop header available.
- DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+ // Translate the offset of the jump instruction to the jump target offset of
+ // that instruction so that the derived BailoutId points to the loop header.
+ osr_loop_offset_ =
+ bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
+ DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
}
}
@@ -1928,17 +1990,20 @@ void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
int origin_offset = bytecode_iterator().current_offset();
// Only build loop exits for forward edges.
if (target_offset > origin_offset) {
- BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+ BuildLoopExitsUntilLoop(
+ bytecode_analysis()->GetLoopOffsetFor(target_offset));
}
}
void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
int origin_offset = bytecode_iterator().current_offset();
- int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+ int current_loop = bytecode_analysis()->GetLoopOffsetFor(origin_offset);
while (loop_offset < current_loop) {
Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
- environment()->PrepareForLoopExit(loop_node);
- current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+ const LoopInfo& loop_info =
+ bytecode_analysis()->GetLoopInfoFor(current_loop);
+ environment()->PrepareForLoopExit(loop_node, loop_info.assignments());
+ current_loop = loop_info.parent_offset();
}
}
@@ -1952,7 +2017,7 @@ void BytecodeGraphBuilder::BuildJump() {
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition);
- Environment* if_false_environment = environment()->CopyForConditional();
+ Environment* if_false_environment = environment()->Copy();
NewIfTrue();
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
set_environment(if_false_environment);
@@ -1961,7 +2026,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
NewBranch(condition);
- Environment* if_true_environment = environment()->CopyForConditional();
+ Environment* if_true_environment = environment()->Copy();
NewIfFalse();
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
set_environment(if_true_environment);
@@ -2006,6 +2071,12 @@ void BytecodeGraphBuilder::BuildJumpIfNotHole() {
BuildJumpIfNot(condition);
}
+void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition = NewNode(simplified()->ObjectIsReceiver(), accumulator);
+ BuildJumpIf(condition);
+}
+
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
if (size > input_buffer_size_) {
size = size + kInputBufferSizeIncrement + input_buffer_size_;
@@ -2093,7 +2164,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
int handler_offset = exception_handlers_.top().handler_offset_;
int context_index = exception_handlers_.top().context_register_;
interpreter::Register context_register(context_index);
- Environment* success_env = environment()->CopyForConditional();
+ Environment* success_env = environment()->Copy();
const Operator* op = common()->IfException();
Node* effect = environment()->GetEffectDependency();
Node* on_exception = graph()->NewNode(op, effect, result);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 6994226dc3..6ca7d29152 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -5,12 +5,10 @@
#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/compiler/bytecode-loop-analysis.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
@@ -18,9 +16,6 @@
namespace v8 {
namespace internal {
-
-class CompilationInfo;
-
namespace compiler {
class SourcePositionTable;
@@ -29,8 +24,10 @@ class SourcePositionTable;
// interpreter bytecodes.
class BytecodeGraphBuilder {
public:
- BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ BytecodeGraphBuilder(Zone* local_zone, Handle<SharedFunctionInfo> shared,
+ Handle<TypeFeedbackVector> feedback_vector,
+ BailoutId osr_ast_id, JSGraph* jsgraph,
+ float invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
@@ -131,12 +128,9 @@ class BytecodeGraphBuilder {
// Conceptually this frame state is "after" a given operation.
void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
- // Computes register liveness and replaces dead ones in frame states with the
- // undefined values.
- void ClearNonLiveSlotsInFrameStates();
-
void BuildCreateArguments(CreateArgumentsType type);
- Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
+ Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+ TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
void BuildNamedStore(LanguageMode language_mode);
void BuildKeyedStore(LanguageMode language_mode);
@@ -181,6 +175,7 @@ class BytecodeGraphBuilder {
void BuildJumpIfToBooleanTrue();
void BuildJumpIfToBooleanFalse();
void BuildJumpIfNotHole();
+ void BuildJumpIfJSReceiver();
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
@@ -203,6 +198,10 @@ class BytecodeGraphBuilder {
// Simulates entry and exit of exception handlers.
void EnterAndExitExceptionHandlers(int current_offset);
+ // Update the current position of the {SourcePositionTable} to that of the
+ // bytecode at {offset}, if any.
+ void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -224,6 +223,9 @@ class BytecodeGraphBuilder {
Zone* graph_zone() const { return graph()->zone(); }
JSGraph* jsgraph() const { return jsgraph_; }
JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph_->simplified();
+ }
Zone* local_zone() const { return local_zone_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
@@ -247,22 +249,14 @@ class BytecodeGraphBuilder {
bytecode_iterator_ = bytecode_iterator;
}
- const BytecodeBranchAnalysis* branch_analysis() const {
- return branch_analysis_;
+ const BytecodeAnalysis* bytecode_analysis() const {
+ return bytecode_analysis_;
}
- void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
- branch_analysis_ = branch_analysis;
+ void set_bytecode_analysis(const BytecodeAnalysis* bytecode_analysis) {
+ bytecode_analysis_ = bytecode_analysis;
}
- const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
-
- void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
- loop_analysis_ = loop_analysis;
- }
-
- LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
-
bool IsLivenessAnalysisEnabled() const {
return this->is_liveness_analysis_enabled_;
}
@@ -279,10 +273,10 @@ class BytecodeGraphBuilder {
Handle<TypeFeedbackVector> feedback_vector_;
const FrameStateFunctionInfo* frame_state_function_info_;
const interpreter::BytecodeArrayIterator* bytecode_iterator_;
- const BytecodeBranchAnalysis* branch_analysis_;
- const BytecodeLoopAnalysis* loop_analysis_;
+ const BytecodeAnalysis* bytecode_analysis_;
Environment* environment_;
BailoutId osr_ast_id_;
+ int osr_loop_offset_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
@@ -309,18 +303,11 @@ class BytecodeGraphBuilder {
StateValuesCache state_values_cache_;
- // Analyzer of register liveness.
- LivenessAnalyzer liveness_analyzer_;
-
- // The Turbofan source position table, to be populated.
+ // The source position table, to be populated.
SourcePositionTable* source_positions_;
SourcePosition const start_position_;
- // Update [source_positions_]'s current position to that of the bytecode at
- // [offset], if any.
- void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
-
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.cc b/deps/v8/src/compiler/bytecode-liveness-map.cc
new file mode 100644
index 0000000000..ba98dec6e5
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-liveness-map.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-liveness-map.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLiveness::BytecodeLiveness(int register_count, Zone* zone)
+ : in(new (zone) BytecodeLivenessState(register_count, zone)),
+ out(new (zone) BytecodeLivenessState(register_count, zone)) {}
+
+BytecodeLivenessMap::BytecodeLivenessMap(int bytecode_size, Zone* zone)
+ : liveness_map_(base::bits::RoundUpToPowerOfTwo32(bytecode_size / 4 + 1),
+ base::KeyEqualityMatcher<int>(),
+ ZoneAllocationPolicy(zone)) {}
+
+uint32_t OffsetHash(int offset) { return offset; }
+
+BytecodeLiveness& BytecodeLivenessMap::InitializeLiveness(int offset,
+ int register_count,
+ Zone* zone) {
+ return liveness_map_
+ .LookupOrInsert(offset, OffsetHash(offset),
+ [&]() { return BytecodeLiveness(register_count, zone); },
+ ZoneAllocationPolicy(zone))
+ ->value;
+}
+
+BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) {
+ return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+const BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) const {
+ return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
new file mode 100644
index 0000000000..03251f1367
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class BytecodeLivenessState : public ZoneObject {
+ public:
+ BytecodeLivenessState(int register_count, Zone* zone)
+ : bit_vector_(register_count + 1, zone) {}
+
+ const BitVector& bit_vector() const { return bit_vector_; }
+
+ BitVector& bit_vector() { return bit_vector_; }
+
+ bool RegisterIsLive(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ return bit_vector_.Contains(index);
+ }
+
+ bool AccumulatorIsLive() const {
+ return bit_vector_.Contains(bit_vector_.length() - 1);
+ }
+
+ bool Equals(const BytecodeLivenessState& other) const {
+ return bit_vector_.Equals(other.bit_vector_);
+ }
+
+ void MarkRegisterLive(int index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ bit_vector_.Add(index);
+ }
+
+ void MarkRegisterDead(int index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ bit_vector_.Remove(index);
+ }
+
+ void MarkAccumulatorLive() { bit_vector_.Add(bit_vector_.length() - 1); }
+
+ void MarkAccumulatorDead() { bit_vector_.Remove(bit_vector_.length() - 1); }
+
+ void MarkAllLive() { bit_vector_.AddAll(); }
+
+ void Union(const BytecodeLivenessState& other) {
+ bit_vector_.Union(other.bit_vector_);
+ }
+
+ bool UnionIsChanged(const BytecodeLivenessState& other) {
+ return bit_vector_.UnionIsChanged(other.bit_vector_);
+ }
+
+ void CopyFrom(const BytecodeLivenessState& other) {
+ bit_vector_.CopyFrom(other.bit_vector_);
+ }
+
+ private:
+ BitVector bit_vector_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
+};
+
+struct BytecodeLiveness {
+ BytecodeLivenessState* in;
+ BytecodeLivenessState* out;
+
+ BytecodeLiveness(int register_count, Zone* zone);
+};
+
+class V8_EXPORT_PRIVATE BytecodeLivenessMap {
+ public:
+ BytecodeLivenessMap(int size, Zone* zone);
+
+ BytecodeLiveness& InitializeLiveness(int offset, int register_count,
+ Zone* zone);
+
+ BytecodeLiveness& GetLiveness(int offset);
+ const BytecodeLiveness& GetLiveness(int offset) const;
+
+ BytecodeLivenessState* GetInLiveness(int offset) {
+ return GetLiveness(offset).in;
+ }
+ const BytecodeLivenessState* GetInLiveness(int offset) const {
+ return GetLiveness(offset).in;
+ }
+
+ BytecodeLivenessState* GetOutLiveness(int offset) {
+ return GetLiveness(offset).out;
+ }
+ const BytecodeLivenessState* GetOutLiveness(int offset) const {
+ return GetLiveness(offset).out;
+ }
+
+ private:
+ base::TemplateHashMapImpl<int, BytecodeLiveness,
+ base::KeyEqualityMatcher<int>, ZoneAllocationPolicy>
+ liveness_map_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.cc b/deps/v8/src/compiler/bytecode-loop-analysis.cc
deleted file mode 100644
index 03c11f7196..0000000000
--- a/deps/v8/src/compiler/bytecode-loop-analysis.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-loop-analysis.h"
-
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeLoopAnalysis::BytecodeLoopAnalysis(
- Handle<BytecodeArray> bytecode_array,
- const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
- : bytecode_array_(bytecode_array),
- branch_analysis_(branch_analysis),
- zone_(zone),
- current_loop_offset_(-1),
- found_current_backedge_(false),
- backedge_to_header_(zone),
- loop_header_to_parent_(zone) {}
-
-void BytecodeLoopAnalysis::Analyze() {
- current_loop_offset_ = -1;
- found_current_backedge_ = false;
- interpreter::BytecodeArrayIterator iterator(bytecode_array());
- while (!iterator.done()) {
- interpreter::Bytecode bytecode = iterator.current_bytecode();
- int current_offset = iterator.current_offset();
- if (branch_analysis_->backward_branches_target(current_offset)) {
- AddLoopEntry(current_offset);
- } else if (interpreter::Bytecodes::IsJump(bytecode)) {
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- }
- iterator.Advance();
- }
-}
-
-void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
- if (found_current_backedge_) {
- // We assume that all backedges of a loop must occur together and before
- // another loop entry or an outer loop backedge.
- // This is guaranteed by the invariants from AddBranch, such that every
- // backedge must either go to the current loop or be the first of the
- // backedges to the parent loop.
- // Thus here, the current loop actually ended before and we have a loop
- // with the same parent.
- current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
- found_current_backedge_ = false;
- }
- loop_header_to_parent_[entry_offset] = current_loop_offset_;
- current_loop_offset_ = entry_offset;
-}
-
-void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
- // If this is a backedge, record it.
- if (target_offset < origin_offset) {
- backedge_to_header_[origin_offset] = target_offset;
- // Check whether this is actually a backedge of the outer loop and we have
- // already finished the current loop.
- if (target_offset < current_loop_offset_) {
- DCHECK(found_current_backedge_);
- int parent_offset = loop_header_to_parent_[current_loop_offset_];
- DCHECK_EQ(target_offset, parent_offset);
- current_loop_offset_ = parent_offset;
- } else {
- DCHECK_EQ(target_offset, current_loop_offset_);
- found_current_backedge_ = true;
- }
- }
-}
-
-int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
- auto next_backedge = backedge_to_header_.lower_bound(offset);
- // If there is no next backedge => offset is not in a loop.
- if (next_backedge == backedge_to_header_.end()) {
- return -1;
- }
- // If the header preceeds the offset, it is the backedge of the containing
- // loop.
- if (next_backedge->second <= offset) {
- return next_backedge->second;
- }
- // Otherwise there is a nested loop after this offset. We just return the
- // parent of the next nested loop.
- return loop_header_to_parent_.upper_bound(offset)->second;
-}
-
-int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
- auto parent = loop_header_to_parent_.find(header_offset);
- DCHECK(parent != loop_header_to_parent_.end());
- return parent->second;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.h b/deps/v8/src/compiler/bytecode-loop-analysis.h
deleted file mode 100644
index 1a86d7b81f..0000000000
--- a/deps/v8/src/compiler/bytecode-loop-analysis.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-
-#include "src/handles.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-class BytecodeBranchAnalysis;
-
-class BytecodeLoopAnalysis BASE_EMBEDDED {
- public:
- BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
- const BytecodeBranchAnalysis* branch_analysis,
- Zone* zone);
-
- // Analyze the bytecodes to find the branch sites and their
- // targets. No other methods in this class return valid information
- // until this has been called.
- void Analyze();
-
- // Get the loop header offset of the containing loop for arbitrary
- // {offset}, or -1 if the {offset} is not inside any loop.
- int GetLoopOffsetFor(int offset) const;
- // Gets the loop header offset of the parent loop of the loop header
- // at {header_offset}, or -1 for outer-most loops.
- int GetParentLoopFor(int header_offset) const;
-
- private:
- void AddLoopEntry(int entry_offset);
- void AddBranch(int origin_offset, int target_offset);
-
- Zone* zone() const { return zone_; }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- Handle<BytecodeArray> bytecode_array_;
- const BytecodeBranchAnalysis* branch_analysis_;
- Zone* zone_;
-
- int current_loop_offset_;
- bool found_current_backedge_;
-
- // Map from the offset of a backedge jump to the offset of the corresponding
- // loop header. There might be multiple backedges for do-while loops.
- ZoneMap<int, int> backedge_to_header_;
- // Map from the offset of a loop header to the offset of its parent's loop
- // header. This map will have as many entries as there are loops in the
- // function.
- ZoneMap<int, int> loop_header_to_parent_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 3431098446..991ae3699d 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -22,15 +22,23 @@
#include "src/utils.h"
#include "src/zone/zone.h"
+#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
+#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
+#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
+#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
+#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
+#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
+#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
+
namespace v8 {
namespace internal {
namespace compiler {
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size)
- : CodeAssembler(
+CodeAssemblerState::CodeAssemblerState(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name, size_t result_size)
+ : CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
@@ -38,19 +46,20 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
MachineType::AnyTagged(), result_size),
flags, name) {}
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name)
- : CodeAssembler(isolate, zone,
- Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- Code::ExtractKindFromFlags(flags) == Code::BUILTIN
- ? CallDescriptor::kPushArgumentCount
- : CallDescriptor::kNoFlags),
- flags, name) {}
-
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor, Code::Flags flags,
- const char* name)
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+ int parameter_count, Code::Flags flags,
+ const char* name)
+ : CodeAssemblerState(isolate, zone,
+ Linkage::GetJSCallDescriptor(
+ zone, false, parameter_count,
+ Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+ ? CallDescriptor::kPushArgumentCount
+ : CallDescriptor::kNoFlags),
+ flags, name) {}
+
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor,
+ Code::Flags flags, const char* name)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
@@ -61,56 +70,109 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
code_generated_(false),
variables_(zone) {}
+CodeAssemblerState::~CodeAssemblerState() {}
+
+int CodeAssemblerState::parameter_count() const {
+ return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
+}
+
CodeAssembler::~CodeAssembler() {}
-void CodeAssembler::CallPrologue() {}
+class BreakOnNodeDecorator final : public GraphDecorator {
+ public:
+ explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
+
+ void Decorate(Node* node) final {
+ if (node->id() == node_id_) {
+ base::OS::DebugBreak();
+ }
+ }
+
+ private:
+ NodeId node_id_;
+};
+
+void CodeAssembler::BreakOnNode(int node_id) {
+ Graph* graph = raw_assembler()->graph();
+ Zone* zone = graph->zone();
+ GraphDecorator* decorator =
+ new (zone) BreakOnNodeDecorator(static_cast<NodeId>(node_id));
+ graph->AddDecorator(decorator);
+}
-void CodeAssembler::CallEpilogue() {}
+void CodeAssembler::RegisterCallGenerationCallbacks(
+ const CodeAssemblerCallback& call_prologue,
+ const CodeAssemblerCallback& call_epilogue) {
+ // The callback can be registered only once.
+ DCHECK(!state_->call_prologue_);
+ DCHECK(!state_->call_epilogue_);
+ state_->call_prologue_ = call_prologue;
+ state_->call_epilogue_ = call_epilogue;
+}
-Handle<Code> CodeAssembler::GenerateCode() {
- DCHECK(!code_generated_);
+void CodeAssembler::UnregisterCallGenerationCallbacks() {
+ state_->call_prologue_ = nullptr;
+ state_->call_epilogue_ = nullptr;
+}
- Schedule* schedule = raw_assembler_->Export();
+void CodeAssembler::CallPrologue() {
+ if (state_->call_prologue_) {
+ state_->call_prologue_();
+ }
+}
+
+void CodeAssembler::CallEpilogue() {
+ if (state_->call_epilogue_) {
+ state_->call_epilogue_();
+ }
+}
+
+// static
+Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
+ DCHECK(!state->code_generated_);
+
+ RawMachineAssembler* rasm = state->raw_assembler_.get();
+ Schedule* schedule = rasm->Export();
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
- schedule, flags_, name_);
+ rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+ state->flags_, state->name_);
- code_generated_ = true;
+ state->code_generated_ = true;
return code;
}
-bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
bool CodeAssembler::IsFloat64RoundUpSupported() const {
- return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+ return raw_assembler()->machine()->Float64RoundUp().IsSupported();
}
bool CodeAssembler::IsFloat64RoundDownSupported() const {
- return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+ return raw_assembler()->machine()->Float64RoundDown().IsSupported();
}
bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
- return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+ return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported();
}
bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
- return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+ return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
}
Node* CodeAssembler::Int32Constant(int32_t value) {
- return raw_assembler_->Int32Constant(value);
+ return raw_assembler()->Int32Constant(value);
}
Node* CodeAssembler::Int64Constant(int64_t value) {
- return raw_assembler_->Int64Constant(value);
+ return raw_assembler()->Int64Constant(value);
}
Node* CodeAssembler::IntPtrConstant(intptr_t value) {
- return raw_assembler_->IntPtrConstant(value);
+ return raw_assembler()->IntPtrConstant(value);
}
Node* CodeAssembler::NumberConstant(double value) {
- return raw_assembler_->NumberConstant(value);
+ return raw_assembler()->NumberConstant(value);
}
Node* CodeAssembler::SmiConstant(Smi* value) {
@@ -122,19 +184,19 @@ Node* CodeAssembler::SmiConstant(int value) {
}
Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
- return raw_assembler_->HeapConstant(object);
+ return raw_assembler()->HeapConstant(object);
}
Node* CodeAssembler::BooleanConstant(bool value) {
- return raw_assembler_->BooleanConstant(value);
+ return raw_assembler()->BooleanConstant(value);
}
Node* CodeAssembler::ExternalConstant(ExternalReference address) {
- return raw_assembler_->ExternalConstant(address);
+ return raw_assembler()->ExternalConstant(address);
}
Node* CodeAssembler::Float64Constant(double value) {
- return raw_assembler_->Float64Constant(value);
+ return raw_assembler()->Float64Constant(value);
}
Node* CodeAssembler::NaNConstant() {
@@ -174,24 +236,28 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
}
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+ if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
+ node->opcode() == IrOpcode::kBitcastWordToTagged) {
+ node = node->InputAt(0);
+ }
IntPtrMatcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
Node* CodeAssembler::Parameter(int value) {
- return raw_assembler_->Parameter(value);
+ return raw_assembler()->Parameter(value);
}
void CodeAssembler::Return(Node* value) {
- return raw_assembler_->Return(value);
+ return raw_assembler()->Return(value);
}
void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
- return raw_assembler_->PopAndReturn(pop, value);
+ return raw_assembler()->PopAndReturn(pop, value);
}
-void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
void CodeAssembler::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
@@ -210,81 +276,118 @@ void CodeAssembler::Comment(const char* format, ...) {
MemCopy(copy + prefix_len, builder.Finalize(), length);
copy[0] = ';';
copy[1] = ' ';
- raw_assembler_->Comment(copy);
+ raw_assembler()->Comment(copy);
}
-void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+void CodeAssembler::Bind(Label* label) { return label->Bind(); }
Node* CodeAssembler::LoadFramePointer() {
- return raw_assembler_->LoadFramePointer();
+ return raw_assembler()->LoadFramePointer();
}
Node* CodeAssembler::LoadParentFramePointer() {
- return raw_assembler_->LoadParentFramePointer();
+ return raw_assembler()->LoadParentFramePointer();
}
Node* CodeAssembler::LoadStackPointer() {
- return raw_assembler_->LoadStackPointer();
+ return raw_assembler()->LoadStackPointer();
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
Node* CodeAssembler::name(Node* a, Node* b) { \
- return raw_assembler_->name(a, b); \
+ return raw_assembler()->name(a, b); \
}
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
+Node* CodeAssembler::IntPtrAdd(Node* left, Node* right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant + right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return raw_assembler()->IntPtrAdd(left, right);
+}
+
+Node* CodeAssembler::IntPtrSub(Node* left, Node* right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant - right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return raw_assembler()->IntPtrSub(left, right);
+}
+
Node* CodeAssembler::WordShl(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+ return (shift != 0) ? raw_assembler()->WordShl(value, IntPtrConstant(shift))
: value;
}
Node* CodeAssembler::WordShr(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+ return (shift != 0) ? raw_assembler()->WordShr(value, IntPtrConstant(shift))
: value;
}
Node* CodeAssembler::Word32Shr(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
+ return (shift != 0) ? raw_assembler()->Word32Shr(value, Int32Constant(shift))
: value;
}
Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- value = raw_assembler_->ChangeUint32ToUint64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ value = raw_assembler()->ChangeUint32ToUint64(value);
}
return value;
}
Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- value = raw_assembler_->ChangeInt32ToInt64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ value = raw_assembler()->ChangeInt32ToInt64(value);
}
return value;
}
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- return raw_assembler_->RoundInt64ToFloat64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ return raw_assembler()->RoundInt64ToFloat64(value);
}
- return raw_assembler_->ChangeInt32ToFloat64(value);
+ return raw_assembler()->ChangeInt32ToFloat64(value);
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
- Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+ Node* CodeAssembler::name(Node* a) { return raw_assembler()->name(a); }
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::Load(MachineType rep, Node* base) {
- return raw_assembler_->Load(rep, base);
+ return raw_assembler()->Load(rep, base);
}
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->Load(rep, base, index);
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
+ return raw_assembler()->Load(rep, base, offset);
}
-Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->AtomicLoad(rep, base, index);
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
+ return raw_assembler()->AtomicLoad(rep, base, offset);
}
Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
@@ -303,28 +406,35 @@ Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
IntPtrConstant(root_index * kPointerSize));
}
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
- return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
+ kFullWriteBarrier);
+}
+
+Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+ value, kFullWriteBarrier);
}
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
- Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
+ Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+ value, kMapWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value) {
- return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+ return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+ Node* offset, Node* value) {
+ return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->AtomicStore(rep, base, index, value);
+ Node* offset, Node* value) {
+ return raw_assembler()->AtomicStore(rep, base, offset, value);
}
Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
@@ -336,11 +446,11 @@ Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
}
Node* CodeAssembler::Retain(Node* value) {
- return raw_assembler_->Retain(value);
+ return raw_assembler()->Retain(value);
}
Node* CodeAssembler::Projection(int index, Node* value) {
- return raw_assembler_->Projection(index, value);
+ return raw_assembler()->Projection(index, value);
}
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
@@ -350,11 +460,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
exception.MergeVariables();
DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
- raw_assembler_->Continuations(node, success.label_, exception.label_);
+ raw_assembler()->Continuations(node, success.label_, exception.label_);
Bind(&exception);
- const Operator* op = raw_assembler_->common()->IfException();
- Node* exception_value = raw_assembler_->AddNode(op, node, node);
+ const Operator* op = raw_assembler()->common()->IfException();
+ Node* exception_value = raw_assembler()->AddNode(op, node, node);
if (exception_var != nullptr) {
exception_var->Bind(exception_value);
}
@@ -363,627 +473,155 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Bind(&success);
}
-Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
- CallEpilogue();
- return return_value;
-}
+template <class... TArgs>
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
+ TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, argc, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(desc->ReturnCount());
-Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+ Node* arity = Int32Constant(argc);
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
- CallEpilogue();
- return return_value;
-}
+ Node* nodes[] = {centry, args..., ref, arity, context};
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1) {
CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+ Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
CallEpilogue();
return return_value;
}
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
- CallEpilogue();
- return return_value;
-}
+// Instantiate CallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
+ Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
- CallEpilogue();
- return return_value;
-}
+template <class... TArgs>
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
+ Node* context, TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, argc, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
- arg3, arg4, context);
- CallEpilogue();
- return return_value;
-}
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+ Node* arity = Int32Constant(argc);
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
- arg3, arg4, arg5, context);
- CallEpilogue();
- return return_value;
-}
+ Node* nodes[] = {centry, args..., ref, arity, context};
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- return raw_assembler_->TailCallRuntime0(function_id, context);
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
+// Instantiate TailCallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
+ Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2) {
- return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+template <class... TArgs>
+Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
+ size_t result_size, Node* target, Node* context,
+ TArgs... args) {
+ Node* nodes[] = {target, args..., context};
+ return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
- context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
- context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5) {
- return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
- arg5, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5,
- Node* arg6) {
- return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
- arg5, arg6, context);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, result_size);
-}
-
-Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStubN(callable.descriptor(), target, args, result_size);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(1);
- args[0] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 3;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 4;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3, const Arg& arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 5;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3, const Arg& arg4,
- const Arg& arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 6;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[arg5.index] = arg5.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
+// Instantiate CallStubR() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
+ const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
- int js_parameter_count, Node* target,
- Node** args, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor,
- descriptor.GetStackParameterCount() + js_parameter_count,
+ size_t result_size, int input_count,
+ Node* const* inputs) {
+ // 2 is for target and context.
+ DCHECK_LE(2, input_count);
+ int argc = input_count - 2;
+ DCHECK_LE(descriptor.GetParameterCount(), argc);
+ // Extra arguments not mentioned in the descriptor are passed on the stack.
+ int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
+ DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, arg5, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, Node* arg6, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(7);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = arg6;
- args[6] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
+ CallPrologue();
+ Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+ CallEpilogue();
+ return return_value;
}
+template <class... TArgs>
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- const Arg& arg4, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ Node* target, Node* context, TArgs... args) {
+ DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+ size_t result_size = 1;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
- const int kArgsCount = 5;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+ Node* nodes[] = {target, args..., context};
- return raw_assembler_->TailCallN(call_descriptor, target, args);
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- const Arg& arg4, const Arg& arg5,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 6;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[arg5.index] = arg5.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
+// Instantiate TailCallStub() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
+ const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& interface_descriptor,
- Node* code_target_address, Node** args) {
- CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
- isolate(), zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount());
- return raw_assembler_->TailCallN(descriptor, code_target_address, args);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver,
- size_t result_size) {
- const int argc = 0;
- Node* target = HeapConstant(callable.code());
+ const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+ DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+ CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+ Node* nodes[] = {target, args...};
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- size_t result_size) {
- const int argc = 1;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- Node* arg2, size_t result_size) {
- const int argc = 2;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = arg2;
- args[5] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- const int argc = 3;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = arg2;
- args[5] = arg3;
- args[6] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
+// Instantiate TailCallBytecodeDispatch() with 4 arguments.
+template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
+ Node*, Node*);
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
Node* arg0, Node* arg1) {
- return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
- function, arg0, arg1);
+ return raw_assembler()->CallCFunction2(return_type, arg0_type, arg1_type,
+ function, arg0, arg1);
+}
+
+Node* CodeAssembler::CallCFunction3(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type,
+ MachineType arg2_type, Node* function,
+ Node* arg0, Node* arg1, Node* arg2) {
+ return raw_assembler()->CallCFunction3(return_type, arg0_type, arg1_type,
+ arg2_type, function, arg0, arg1, arg2);
}
-void CodeAssembler::Goto(CodeAssembler::Label* label) {
+void CodeAssembler::Goto(Label* label) {
label->MergeVariables();
- raw_assembler_->Goto(label->label_);
+ raw_assembler()->Goto(label->label_);
}
void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
@@ -998,12 +636,12 @@ void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
Bind(&true_label);
}
-void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
- CodeAssembler::Label* false_label) {
+void CodeAssembler::Branch(Node* condition, Label* true_label,
+ Label* false_label) {
true_label->MergeVariables();
false_label->MergeVariables();
- return raw_assembler_->Branch(condition, true_label->label_,
- false_label->label_);
+ return raw_assembler()->Branch(condition, true_label->label_,
+ false_label->label_);
}
void CodeAssembler::Switch(Node* index, Label* default_label,
@@ -1017,75 +655,61 @@ void CodeAssembler::Switch(Node* index, Label* default_label,
case_labels[i]->MergeVariables();
default_label->MergeVariables();
}
- return raw_assembler_->Switch(index, default_label->label_, case_values,
- labels, case_count);
-}
-
-Node* CodeAssembler::Select(Node* condition, Node* true_value,
- Node* false_value, MachineRepresentation rep) {
- Variable value(this, rep);
- Label vtrue(this), vfalse(this), end(this);
- Branch(condition, &vtrue, &vfalse);
-
- Bind(&vtrue);
- {
- value.Bind(true_value);
- Goto(&end);
- }
- Bind(&vfalse);
- {
- value.Bind(false_value);
- Goto(&end);
- }
-
- Bind(&end);
- return value.value();
+ return raw_assembler()->Switch(index, default_label->label_, case_values,
+ labels, case_count);
}
// RawMachineAssembler delegate helpers:
-Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
Factory* CodeAssembler::factory() const { return isolate()->factory(); }
-Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
+
+RawMachineAssembler* CodeAssembler::raw_assembler() const {
+ return state_->raw_assembler_.get();
+}
// The core implementation of Variable is stored through an indirection so
// that it can outlive the often block-scoped Variable declarations. This is
// needed to ensure that variable binding and merging through phis can
// properly be verified.
-class CodeAssembler::Variable::Impl : public ZoneObject {
+class CodeAssemblerVariable::Impl : public ZoneObject {
public:
explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
Node* value_;
MachineRepresentation rep_;
};
-CodeAssembler::Variable::Variable(CodeAssembler* assembler,
- MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
- assembler->variables_.insert(impl_);
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+ state_->variables_.insert(impl_);
}
-CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+CodeAssemblerVariable::~CodeAssemblerVariable() {
+ state_->variables_.erase(impl_);
+}
-void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
-Node* CodeAssembler::Variable::value() const {
+Node* CodeAssemblerVariable::value() const {
DCHECK_NOT_NULL(impl_->value_);
return impl_->value_;
}
-MachineRepresentation CodeAssembler::Variable::rep() const {
- return impl_->rep_;
-}
+MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
-bool CodeAssembler::Variable::IsBound() const {
- return impl_->value_ != nullptr;
-}
+bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
-CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
- Variable** vars, CodeAssembler::Label::Type type)
- : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
+ size_t vars_count,
+ CodeAssemblerVariable** vars,
+ CodeAssemblerLabel::Type type)
+ : bound_(false),
+ merge_count_(0),
+ state_(assembler->state()),
+ label_(nullptr) {
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
label_ = new (buffer)
RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
@@ -1095,9 +719,9 @@ CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
}
}
-void CodeAssembler::Label::MergeVariables() {
+void CodeAssemblerLabel::MergeVariables() {
++merge_count_;
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
size_t count = 0;
Node* node = var->value_;
if (node != nullptr) {
@@ -1122,7 +746,7 @@ void CodeAssembler::Label::MergeVariables() {
auto phi = variable_phis_.find(var);
if (phi != variable_phis_.end()) {
DCHECK_NOT_NULL(phi->second);
- assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+ state_->raw_assembler_->AppendPhiInput(phi->second, node);
} else {
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
@@ -1141,13 +765,13 @@ void CodeAssembler::Label::MergeVariables() {
}
}
-void CodeAssembler::Label::Bind() {
+void CodeAssemblerLabel::Bind() {
DCHECK(!bound_);
- assembler_->raw_assembler_->Bind(label_);
+ state_->raw_assembler_->Bind(label_);
// Make sure that all variables that have changed along any path up to this
// point are marked as merge variables.
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
Node* shared_value = nullptr;
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
@@ -1165,22 +789,23 @@ void CodeAssembler::Label::Bind() {
}
for (auto var : variable_phis_) {
- CodeAssembler::Variable::Impl* var_impl = var.first;
+ CodeAssemblerVariable::Impl* var_impl = var.first;
auto i = variable_merges_.find(var_impl);
- // If the following assert fires, then a variable that has been marked as
+ // If the following asserts fire, then a variable that has been marked as
// being merged at the label--either by explicitly marking it so in the
// label constructor or by having seen different bound values at branches
// into the label--doesn't have a bound value along all of the paths that
// have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
- Node* phi = assembler_->raw_assembler_->Phi(
+ DCHECK(i != variable_merges_.end());
+ DCHECK_EQ(i->second.size(), merge_count_);
+ Node* phi = state_->raw_assembler_->Phi(
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
variable_phis_[var_impl] = phi;
}
// Bind all variables to a merge phi, the common value along all paths or
// null.
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
auto i = variable_phis_.find(var);
if (i != variable_phis_.end()) {
var->value_ = i->second;
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 1f364d99e3..25b1fab4a7 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
@@ -30,10 +31,17 @@ class Zone;
namespace compiler {
class CallDescriptor;
+class CodeAssemblerLabel;
+class CodeAssemblerVariable;
+class CodeAssemblerState;
class Node;
class RawMachineAssembler;
class RawMachineLabel;
+typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+
+typedef std::function<void()> CodeAssemblerCallback;
+
#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float32Equal) \
V(Float32LessThan) \
@@ -79,9 +87,7 @@ class RawMachineLabel;
V(Float64Pow) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
- V(IntPtrAdd) \
V(IntPtrAddWithOverflow) \
- V(IntPtrSub) \
V(IntPtrSubWithOverflow) \
V(IntPtrMul) \
V(Int32Add) \
@@ -157,6 +163,7 @@ class RawMachineLabel;
V(Float64RoundTiesEven) \
V(Float64RoundTruncate) \
V(Word32Clz) \
+ V(Word32Not) \
V(Word32BinaryNot)
// A "public" interface used by components outside of compiler directory to
@@ -175,22 +182,16 @@ class RawMachineLabel;
// clients, CodeAssembler also provides an abstraction for creating variables
// and enhanced Label functionality to merge variable values along paths where
// they have differing values, including loops.
+//
+// The CodeAssembler itself is stateless (and instances are expected to be
+// temporary-scoped and short-lived); all its state is encapsulated into
+// a CodeAssemblerState instance.
class V8_EXPORT_PRIVATE CodeAssembler {
public:
- // Create with CallStub linkage.
- // |result_size| specifies the number of results returned by the stub.
- // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
- CodeAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor, Code::Flags flags,
- const char* name, size_t result_size = 1);
-
- // Create with JSCall linkage.
- CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
+ explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
+ ~CodeAssembler();
- virtual ~CodeAssembler();
-
- Handle<Code> GenerateCode();
+ static Handle<Code> GenerateCode(CodeAssemblerState* state);
bool Is64() const;
bool IsFloat64RoundUpSupported() const;
@@ -198,24 +199,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsFloat64RoundTiesEvenSupported() const;
bool IsFloat64RoundTruncateSupported() const;
- class Label;
- class Variable {
- public:
- explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
- ~Variable();
- void Bind(Node* value);
- Node* value() const;
- MachineRepresentation rep() const;
- bool IsBound() const;
-
- private:
- friend class CodeAssembler;
- class Impl;
- Impl* impl_;
- CodeAssembler* assembler_;
- };
-
- typedef ZoneList<Variable*> VariableList;
+ // Shortened aliases for use in CodeAssembler subclasses.
+ typedef CodeAssemblerLabel Label;
+ typedef CodeAssemblerVariable Variable;
+ typedef CodeAssemblerVariableList VariableList;
// ===========================================================================
// Base Assembler
@@ -255,9 +242,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Switch(Node* index, Label* default_label, const int32_t* case_values,
Label** case_labels, size_t case_count);
- Node* Select(Node* condition, Node* true_value, Node* false_value,
- MachineRepresentation rep = MachineRepresentation::kTagged);
-
// Access to the frame pointer
Node* LoadFramePointer();
Node* LoadParentFramePointer();
@@ -267,19 +251,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
- Node* Load(MachineType rep, Node* base, Node* index);
- Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+ Node* Load(MachineType rep, Node* base, Node* offset);
+ Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Store value to raw memory location.
- Node* Store(MachineRepresentation rep, Node* base, Node* value);
- Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+ Node* Store(Node* base, Node* value);
+ Node* Store(Node* base, Node* offset, Node* value);
+ Node* StoreWithMapWriteBarrier(Node* base, Node* offset, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
// Store a value to the root array.
@@ -290,6 +275,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
+ Node* IntPtrAdd(Node* left, Node* right);
+ Node* IntPtrSub(Node* left, Node* right);
+
Node* WordShl(Node* value, int shift);
Node* WordShr(Node* value, int shift);
Node* Word32Shr(Node* value, int shift);
@@ -316,149 +304,76 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* Projection(int index, Node* value);
// Calls
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, Node* arg6);
-
- // A pair of a zero-based argument index and a value.
- // It helps writing arguments order independent code.
- struct Arg {
- Arg(int index, Node* value) : index(index), value(value) {}
-
- int const index;
- Node* const value;
- };
-
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
- Node* CallStubN(Callable const& callable, Node** args,
- size_t result_size = 1);
+ template <class... TArgs>
+ Node* CallRuntime(Runtime::FunctionId function, Node* context, TArgs... args);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size = 1);
+ template <class... TArgs>
+ Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
+ TArgs... args);
+ template <class... TArgs>
+ Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, args...);
+ }
+
+ template <class... TArgs>
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, const Arg& arg5,
- size_t result_size = 1);
-
- Node* CallStubN(const CallInterfaceDescriptor& descriptor,
- int js_parameter_count, Node* target, Node** args,
- size_t result_size = 1);
- Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
- Node** args, size_t result_size = 1) {
- return CallStubN(descriptor, 0, target, args, result_size);
+ Node* context, TArgs... args) {
+ return CallStubR(descriptor, 1, target, context, args...);
}
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5,
- size_t result_size = 1);
+ template <class... TArgs>
+ Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
+ Node* target, Node* context, TArgs... args);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2,
- size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* arg6,
- size_t result_size = 1);
+ Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
+ int input_count, Node* const* inputs);
+ template <class... TArgs>
+ Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, args...);
+ }
+
+ template <class... TArgs>
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, const Arg& arg5,
- size_t result_size = 1);
+ Node* context, TArgs... args);
+ template <class... TArgs>
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
- Node* code_target_address, Node** args);
+ Node* target, TArgs... args);
+ template <class... TArgs>
Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
+ Node* receiver, TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ Node* arity = Int32Constant(argc);
+ return CallStub(callable, context, function, arity, receiver, args...);
+ }
+
+ template <class... TArgs>
+ Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
+ TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ Node* arity = Int32Constant(argc);
+ Node* receiver = LoadRoot(Heap::kUndefinedValueRootIndex);
+
+ // Construct(target, new_target, arity, receiver, arguments...)
+ return CallStub(callable, context, new_target, new_target, arity, receiver,
+ args...);
+ }
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
Node* arg1);
+ // Call to a C function with three arguments.
+ Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2);
+
// Exception handling support.
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
@@ -468,45 +383,68 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Isolate* isolate() const;
Zone* zone() const;
+ CodeAssemblerState* state() { return state_; }
+
+ void BreakOnNode(int node_id);
+
protected:
- // Enables subclasses to perform operations before and after a call.
- virtual void CallPrologue();
- virtual void CallEpilogue();
+ void RegisterCallGenerationCallbacks(
+ const CodeAssemblerCallback& call_prologue,
+ const CodeAssemblerCallback& call_epilogue);
+ void UnregisterCallGenerationCallbacks();
private:
- CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
- Code::Flags flags, const char* name);
+ RawMachineAssembler* raw_assembler() const;
- Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
- Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ // Calls respective callback registered in the state.
+ void CallPrologue();
+ void CallEpilogue();
- std::unique_ptr<RawMachineAssembler> raw_assembler_;
- Code::Flags flags_;
- const char* name_;
- bool code_generated_;
- ZoneSet<Variable::Impl*> variables_;
+ CodeAssemblerState* state_;
DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
-class CodeAssembler::Label {
+class CodeAssemblerVariable {
+ public:
+ explicit CodeAssemblerVariable(CodeAssembler* assembler,
+ MachineRepresentation rep);
+ ~CodeAssemblerVariable();
+ void Bind(Node* value);
+ Node* value() const;
+ MachineRepresentation rep() const;
+ bool IsBound() const;
+
+ private:
+ friend class CodeAssemblerLabel;
+ friend class CodeAssemblerState;
+ class Impl;
+ Impl* impl_;
+ CodeAssemblerState* state_;
+};
+
+class CodeAssemblerLabel {
public:
enum Type { kDeferred, kNonDeferred };
- explicit Label(
+ explicit CodeAssemblerLabel(
+ CodeAssembler* assembler,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, 0, nullptr, type) {}
+ CodeAssemblerLabel(
CodeAssembler* assembler,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : CodeAssembler::Label(assembler, 0, nullptr, type) {}
- Label(CodeAssembler* assembler, const VariableList& merged_variables,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : CodeAssembler::Label(assembler, merged_variables.length(),
- &(merged_variables[0]), type) {}
- Label(CodeAssembler* assembler, size_t count, Variable** vars,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
- Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : Label(assembler, 1, &merged_variable, type) {}
- ~Label() {}
+ const CodeAssemblerVariableList& merged_variables,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, merged_variables.length(),
+ &(merged_variables[0]), type) {}
+ CodeAssemblerLabel(
+ CodeAssembler* assembler, size_t count, CodeAssemblerVariable** vars,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred);
+ CodeAssemblerLabel(
+ CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
+ ~CodeAssemblerLabel() {}
private:
friend class CodeAssembler;
@@ -516,14 +454,53 @@ class CodeAssembler::Label {
bool bound_;
size_t merge_count_;
- CodeAssembler* assembler_;
+ CodeAssemblerState* state_;
RawMachineLabel* label_;
// Map of variables that need to be merged to their phi nodes (or placeholders
// for those phis).
- std::map<Variable::Impl*, Node*> variable_phis_;
+ std::map<CodeAssemblerVariable::Impl*, Node*> variable_phis_;
// Map of variables to the list of value nodes that have been added from each
// merge path in their order of merging.
- std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+ std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+class V8_EXPORT_PRIVATE CodeAssemblerState {
+ public:
+ // Create with CallStub linkage.
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+ CodeAssemblerState(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
+ ~CodeAssemblerState();
+
+ const char* name() const { return name_; }
+ int parameter_count() const;
+
+ private:
+ friend class CodeAssembler;
+ friend class CodeAssemblerLabel;
+ friend class CodeAssemblerVariable;
+
+ CodeAssemblerState(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor, Code::Flags flags,
+ const char* name);
+
+ std::unique_ptr<RawMachineAssembler> raw_assembler_;
+ Code::Flags flags_;
+ const char* name_;
+ bool code_generated_;
+ ZoneSet<CodeAssemblerVariable::Impl*> variables_;
+ CodeAssemblerCallback call_prologue_;
+ CodeAssemblerCallback call_epilogue_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index c69e86e0a5..7863476871 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -33,8 +33,10 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info)
+CodeGenerator::CodeGenerator(
+ Frame* frame, Linkage* linkage, InstructionSequence* code,
+ CompilationInfo* info,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions)
: frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
@@ -56,8 +58,10 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
jump_tables_(nullptr),
ools_(nullptr),
osr_pc_offset_(-1),
+ optimized_out_literal_id_(-1),
source_position_table_builder_(code->zone(),
- info->SourcePositionRecordingMode()) {
+ info->SourcePositionRecordingMode()),
+ protected_instructions_(protected_instructions) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -71,6 +75,15 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
+void CodeGenerator::AddProtectedInstruction(int instr_offset,
+ int landing_offset) {
+ if (protected_instructions_ != nullptr) {
+ trap_handler::ProtectedInstructionData data = {instr_offset,
+ landing_offset};
+ protected_instructions_->emplace_back(data);
+ }
+}
+
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
@@ -79,6 +92,11 @@ Handle<Code> CodeGenerator::GenerateCode() {
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(masm(), StackFrame::MANUAL);
+ if (info->is_source_positions_enabled()) {
+ SourcePosition source_position(info->shared_info()->start_position());
+ AssembleSourcePosition(source_position);
+ }
+
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -392,6 +410,10 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot;
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode != kFlags_trap) {
+ AssembleSourcePosition(instr);
+ }
bool adjust_stack =
GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
@@ -404,12 +426,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
if (instr->IsJump() && block->must_deconstruct_frame()) {
AssembleDeconstructFrame();
}
- AssembleSourcePosition(instr);
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
case kFlags_branch: {
@@ -461,6 +481,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBoolean(instr, condition);
break;
}
+ case kFlags_trap: {
+ AssembleArchTrap(instr, condition);
+ break;
+ }
case kFlags_none: {
break;
}
@@ -468,10 +492,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
return kSuccess;
}
-
void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
SourcePosition source_position = SourcePosition::Unknown();
+ if (instr->IsNop() && instr->AreMovesRedundant()) return;
if (!code()->GetSourcePosition(instr, &source_position)) return;
+ AssembleSourcePosition(source_position);
+}
+
+void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
@@ -481,7 +509,13 @@ void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
CompilationInfo* info = this->info();
if (!info->parse_info()) return;
std::ostringstream buffer;
- buffer << "-- " << source_position.InliningStack(info) << " --";
+ buffer << "-- ";
+ if (FLAG_trace_turbo) {
+ buffer << source_position;
+ } else {
+ buffer << source_position.InliningStack(info);
+ }
+ buffer << " --";
masm()->RecordComment(StrDup(buffer.str().c_str()));
}
}
@@ -628,15 +662,6 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
OutputFrameStateCombine::Ignore());
}
-#if DEBUG
- // Make sure all the values live in stack slots or they are immediates.
- // (The values should not live in register because registers are clobbered
- // by calls.)
- for (size_t i = 0; i < descriptor->GetSize(); i++) {
- InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
- CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
- }
-#endif
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
}
}
@@ -666,19 +691,37 @@ DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
}
void CodeGenerator::TranslateStateValueDescriptor(
- StateValueDescriptor* desc, Translation* translation,
- InstructionOperandIterator* iter) {
+ StateValueDescriptor* desc, StateValueList* nested,
+ Translation* translation, InstructionOperandIterator* iter) {
+ // Note:
+ // If translation is null, we just skip the relevant instruction operands.
if (desc->IsNested()) {
- translation->BeginCapturedObject(static_cast<int>(desc->size()));
- for (size_t index = 0; index < desc->fields().size(); index++) {
- TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+ if (translation != nullptr) {
+ translation->BeginCapturedObject(static_cast<int>(nested->size()));
+ }
+ for (auto field : *nested) {
+ TranslateStateValueDescriptor(field.desc, field.nested, translation,
+ iter);
}
} else if (desc->IsDuplicate()) {
- translation->DuplicateObject(static_cast<int>(desc->id()));
+ if (translation != nullptr) {
+ translation->DuplicateObject(static_cast<int>(desc->id()));
+ }
+ } else if (desc->IsPlain()) {
+ InstructionOperand* op = iter->Advance();
+ if (translation != nullptr) {
+ AddTranslationForOperand(translation, iter->instruction(), op,
+ desc->type());
+ }
} else {
- DCHECK(desc->IsPlain());
- AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
- desc->type());
+ DCHECK(desc->IsOptimizedOut());
+ if (translation != nullptr) {
+ if (optimized_out_literal_id_ == -1) {
+ optimized_out_literal_id_ =
+ DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+ }
+ translation->StoreLiteral(optimized_out_literal_id_);
+ }
}
}
@@ -686,44 +729,41 @@ void CodeGenerator::TranslateStateValueDescriptor(
void CodeGenerator::TranslateFrameStateDescriptorOperands(
FrameStateDescriptor* desc, InstructionOperandIterator* iter,
OutputFrameStateCombine combine, Translation* translation) {
- for (size_t index = 0; index < desc->GetSize(combine); index++) {
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput: {
- DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
- size_t size_without_output =
- desc->GetSize(OutputFrameStateCombine::Ignore());
- // If the index is past the existing stack items in values_.
- if (index >= size_without_output) {
- // Materialize the result of the call instruction in this slot.
- AddTranslationForOperand(
- translation, iter->instruction(),
- iter->instruction()->OutputAt(index - size_without_output),
- MachineType::AnyTagged());
- continue;
- }
- break;
+ size_t index = 0;
+ StateValueList* values = desc->GetStateValueDescriptors();
+ for (StateValueList::iterator it = values->begin(); it != values->end();
+ ++it, ++index) {
+ StateValueDescriptor* value_desc = (*it).desc;
+ if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+ // The result of the call should be placed at position
+ // [index_from_top] in the stack (overwriting whatever was
+ // previously there).
+ size_t index_from_top =
+ desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ if (index >= index_from_top &&
+ index < index_from_top + iter->instruction()->OutputCount()) {
+ DCHECK_NOT_NULL(translation);
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - index_from_top),
+ MachineType::AnyTagged());
+ // Skip the instruction operands.
+ TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
+ continue;
}
- case OutputFrameStateCombine::kPokeAt:
- // The result of the call should be placed at position
- // [index_from_top] in the stack (overwriting whatever was
- // previously there).
- size_t index_from_top =
- desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + iter->instruction()->OutputCount()) {
- AddTranslationForOperand(
- translation, iter->instruction(),
- iter->instruction()->OutputAt(index - index_from_top),
- MachineType::AnyTagged());
- iter->Advance(); // We do not use this input, but we need to
- // advace, as the input got replaced.
- continue;
- }
- break;
}
- StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
- TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
- iter);
+ TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
+ }
+ DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
+
+ if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
+ DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+ for (size_t output = 0; output < combine.GetPushCount(); output++) {
+ // Materialize the result of the call instruction in this slot.
+ AddTranslationForOperand(translation, iter->instruction(),
+ iter->instruction()->OutputAt(output),
+ MachineType::AnyTagged());
+ }
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 7aed85a37f..e20a8be774 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -12,6 +12,7 @@
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
#include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
@@ -52,7 +53,9 @@ class InstructionOperandIterator {
class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info);
+ InstructionSequence* code, CompilationInfo* info,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions = nullptr);
// Generate native code.
Handle<Code> GenerateCode();
@@ -65,6 +68,16 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
+ void AddProtectedInstruction(int instr_offset, int landing_offset);
+
+ void AssembleSourcePosition(Instruction* instr);
+
+ void AssembleSourcePosition(SourcePosition source_position);
+
+ // Record a safepoint with the given pointer map.
+ void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode);
+
private:
MacroAssembler* masm() { return &masm_; }
GapResolver* resolver() { return &resolver_; }
@@ -82,10 +95,6 @@ class CodeGenerator final : public GapResolver::Assembler {
// assembling code, in which case, a fall-through can be used.
bool IsNextInAssemblyOrder(RpoNumber block) const;
- // Record a safepoint with the given pointer map.
- void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode deopt_mode);
-
// Check if a heap object can be materialized by loading from a heap root,
// which is cheaper on some platforms than materializing the actual heap
// object constant.
@@ -100,7 +109,6 @@ class CodeGenerator final : public GapResolver::Assembler {
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(Instruction* instr,
const InstructionBlock* block);
- void AssembleSourcePosition(Instruction* instr);
void AssembleGaps(Instruction* instr);
// Returns true if a instruction is a tail call that needs to adjust the stack
@@ -116,6 +124,7 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
@@ -213,6 +222,7 @@ class CodeGenerator final : public GapResolver::Assembler {
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
Translation* translation, OutputFrameStateCombine state_combine);
void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+ StateValueList* nested,
Translation* translation,
InstructionOperandIterator* iter);
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
@@ -279,7 +289,9 @@ class CodeGenerator final : public GapResolver::Assembler {
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
+ int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 9a368162ef..85d49b7ae6 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -36,7 +36,6 @@ Decision DecideCondition(Node* const cond) {
} // namespace
-
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine)
@@ -44,8 +43,9 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
graph_(graph),
common_(common),
machine_(machine),
- dead_(graph->NewNode(common->Dead())) {}
-
+ dead_(graph->NewNode(common->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
@@ -195,15 +195,16 @@ Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
- int const input_count = node->InputCount() - 1;
- DCHECK_LE(1, input_count);
- Node* const merge = node->InputAt(input_count);
+ Node::Inputs inputs = node->inputs();
+ int const effect_input_count = inputs.count() - 1;
+ DCHECK_LE(1, effect_input_count);
+ Node* const merge = inputs[effect_input_count];
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
- DCHECK_EQ(input_count, merge->InputCount());
- Node* const effect = node->InputAt(0);
+ DCHECK_EQ(effect_input_count, merge->InputCount());
+ Node* const effect = inputs[0];
DCHECK_NE(node, effect);
- for (int i = 1; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 1; i < effect_input_count; ++i) {
+ Node* const input = inputs[i];
if (input == node) {
// Ignore redundant inputs.
DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -219,16 +220,18 @@ Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
Reduction CommonOperatorReducer::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
- int const input_count = node->InputCount() - 1;
- DCHECK_LE(1, input_count);
- Node* const merge = node->InputAt(input_count);
+ Node::Inputs inputs = node->inputs();
+ int const value_input_count = inputs.count() - 1;
+ DCHECK_LE(1, value_input_count);
+ Node* const merge = inputs[value_input_count];
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
- DCHECK_EQ(input_count, merge->InputCount());
- if (input_count == 2) {
- Node* vtrue = node->InputAt(0);
- Node* vfalse = node->InputAt(1);
- Node* if_true = merge->InputAt(0);
- Node* if_false = merge->InputAt(1);
+ DCHECK_EQ(value_input_count, merge->InputCount());
+ if (value_input_count == 2) {
+ Node* vtrue = inputs[0];
+ Node* vfalse = inputs[1];
+ Node::Inputs merge_inputs = merge->inputs();
+ Node* if_true = merge_inputs[0];
+ Node* if_false = merge_inputs[1];
if (if_true->opcode() != IrOpcode::kIfTrue) {
std::swap(if_true, if_false);
std::swap(vtrue, vfalse);
@@ -265,10 +268,10 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
}
}
}
- Node* const value = node->InputAt(0);
+ Node* const value = inputs[0];
DCHECK_NE(node, value);
- for (int i = 1; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 1; i < value_input_count; ++i) {
+ Node* const input = inputs[i];
if (input == node) {
// Ignore redundant inputs.
DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -284,7 +287,6 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
- Node* const value = node->InputAt(1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
bool changed = false;
@@ -295,25 +297,32 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
NodeProperties::ReplaceEffectInput(node, effect);
changed = true;
}
+ // TODO(ahaas): Extend the reduction below to multiple return values.
+ if (ValueInputCountOfReturn(node->op()) != 1) {
+ return NoChange();
+ }
+ Node* const value = node->InputAt(1);
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control &&
effect->opcode() == IrOpcode::kEffectPhi &&
NodeProperties::GetControlInput(effect) == control &&
control->opcode() == IrOpcode::kMerge) {
- int const control_input_count = control->InputCount();
- DCHECK_NE(0, control_input_count);
- DCHECK_EQ(control_input_count, value->InputCount() - 1);
- DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+ Node::Inputs control_inputs = control->inputs();
+ Node::Inputs value_inputs = value->inputs();
+ Node::Inputs effect_inputs = effect->inputs();
+ DCHECK_NE(0, control_inputs.count());
+ DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
+ DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- for (int i = 0; i < control_input_count; ++i) {
+ for (int i = 0; i < control_inputs.count(); ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
// previously connected to {end}, so we know for sure that at some point
// the reducer logic will visit {end} again.
Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
- value->InputAt(i), effect->InputAt(i),
- control->InputAt(i));
+ value_inputs[i], effect_inputs[i],
+ control_inputs[i]);
NodeProperties::MergeControlToEnd(graph(), common(), ret);
}
// Mark the merge {control} and return {node} as {dead}.
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 9ce6f71a0f..2cd63314cf 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -7,9 +7,11 @@
#include "src/assembler.h"
#include "src/base/lazy-instance.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -41,6 +43,13 @@ DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
return OpParameter<DeoptimizeReason>(op);
}
+int ValueInputCountOfReturn(Operator const* const op) {
+ DCHECK(op->opcode() == IrOpcode::kReturn);
+ // Return nodes have a hidden input at index 0 which we ignore in the value
+ // input count.
+ return op->ValueInputCount() - 1;
+}
+
size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
@@ -171,6 +180,106 @@ std::ostream& operator<<(std::ostream& os,
return os << p.value() << "|" << p.rmode() << "|" << p.type();
}
+SparseInputMask::InputIterator::InputIterator(
+ SparseInputMask::BitMaskType bit_mask, Node* parent)
+ : bit_mask_(bit_mask), parent_(parent), real_index_(0) {
+#if DEBUG
+ if (bit_mask_ != SparseInputMask::kDenseBitMask) {
+ DCHECK_EQ(base::bits::CountPopulation(bit_mask_) -
+ base::bits::CountPopulation(kEndMarker),
+ parent->InputCount());
+ }
+#endif
+}
+
+void SparseInputMask::InputIterator::Advance() {
+ DCHECK(!IsEnd());
+
+ if (IsReal()) {
+ ++real_index_;
+ }
+ bit_mask_ >>= 1;
+}
+
+Node* SparseInputMask::InputIterator::GetReal() const {
+ DCHECK(IsReal());
+ return parent_->InputAt(real_index_);
+}
+
+bool SparseInputMask::InputIterator::IsReal() const {
+ return bit_mask_ == SparseInputMask::kDenseBitMask ||
+ (bit_mask_ & kEntryMask);
+}
+
+bool SparseInputMask::InputIterator::IsEnd() const {
+ return (bit_mask_ == kEndMarker) ||
+ (bit_mask_ == SparseInputMask::kDenseBitMask &&
+ real_index_ >= parent_->InputCount());
+}
+
+int SparseInputMask::CountReal() const {
+ DCHECK(!IsDense());
+ return base::bits::CountPopulation(bit_mask_) -
+ base::bits::CountPopulation(kEndMarker);
+}
+
+SparseInputMask::InputIterator SparseInputMask::IterateOverInputs(Node* node) {
+ DCHECK(IsDense() || CountReal() == node->InputCount());
+ return InputIterator(bit_mask_, node);
+}
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+ return lhs.mask() == rhs.mask();
+}
+
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(SparseInputMask const& p) {
+ return base::hash_value(p.mask());
+}
+
+std::ostream& operator<<(std::ostream& os, SparseInputMask const& p) {
+ if (p.IsDense()) {
+ return os << "dense";
+ } else {
+ SparseInputMask::BitMaskType mask = p.mask();
+ DCHECK_NE(mask, SparseInputMask::kDenseBitMask);
+
+ os << "sparse:";
+
+ while (mask != SparseInputMask::kEndMarker) {
+ if (mask & SparseInputMask::kEntryMask) {
+ os << "^";
+ } else {
+ os << ".";
+ }
+ mask >>= 1;
+ }
+ return os;
+ }
+}
+
+bool operator==(TypedStateValueInfo const& lhs,
+ TypedStateValueInfo const& rhs) {
+ return lhs.machine_types() == rhs.machine_types() &&
+ lhs.sparse_input_mask() == rhs.sparse_input_mask();
+}
+
+bool operator!=(TypedStateValueInfo const& lhs,
+ TypedStateValueInfo const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(TypedStateValueInfo const& p) {
+ return base::hash_combine(p.machine_types(), p.sparse_input_mask());
+}
+
+std::ostream& operator<<(std::ostream& os, TypedStateValueInfo const& p) {
+ return os << p.machine_types() << "|" << p.sparse_input_mask();
+}
+
size_t hash_value(RegionObservability observability) {
return static_cast<size_t>(observability);
}
@@ -235,9 +344,23 @@ OsrGuardType OsrGuardTypeOf(Operator const* op) {
return OpParameter<OsrGuardType>(op);
}
+SparseInputMask SparseInputMaskOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kStateValues ||
+ op->opcode() == IrOpcode::kTypedStateValues);
+
+ if (op->opcode() == IrOpcode::kTypedStateValues) {
+ return OpParameter<TypedStateValueInfo>(op).sparse_input_mask();
+ }
+ return OpParameter<SparseInputMask>(op);
+}
+
ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
op->opcode() == IrOpcode::kTypedStateValues);
+
+ if (op->opcode() == IrOpcode::kTypedStateValues) {
+ return OpParameter<TypedStateValueInfo>(op).machine_types();
+ }
return OpParameter<const ZoneVector<MachineType>*>(op);
}
@@ -330,6 +453,21 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(WrongInstanceType) \
V(WrongMap)
+#define CACHED_TRAP_IF_LIST(V) \
+ V(TrapDivUnrepresentable) \
+ V(TrapFloatUnrepresentable)
+
+// The reason for a trap.
+#define CACHED_TRAP_UNLESS_LIST(V) \
+ V(TrapUnreachable) \
+ V(TrapMemOutOfBounds) \
+ V(TrapDivByZero) \
+ V(TrapDivUnrepresentable) \
+ V(TrapRemByZero) \
+ V(TrapFloatUnrepresentable) \
+ V(TrapFuncInvalid) \
+ V(TrapFuncSigMismatch)
+
#define CACHED_PARAMETER_LIST(V) \
V(0) \
V(1) \
@@ -529,6 +667,38 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
+ template <int32_t trap_id>
+ struct TrapIfOperator final : public Operator1<int32_t> {
+ TrapIfOperator()
+ : Operator1<int32_t>( // --
+ IrOpcode::kTrapIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapIf", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id) {} // parameter
+ };
+#define CACHED_TRAP_IF(Trap) \
+ TrapIfOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+ kTrapIf##Trap##Operator;
+ CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+
+ template <int32_t trap_id>
+ struct TrapUnlessOperator final : public Operator1<int32_t> {
+ TrapUnlessOperator()
+ : Operator1<int32_t>( // --
+ IrOpcode::kTrapUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapUnless", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id) {} // parameter
+ };
+#define CACHED_TRAP_UNLESS(Trap) \
+ TrapUnlessOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+ kTrapUnless##Trap##Operator;
+ CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+
template <MachineRepresentation kRep, int kInputCount>
struct PhiOperator final : public Operator1<MachineRepresentation> {
PhiOperator()
@@ -588,13 +758,14 @@ struct CommonOperatorGlobalCache final {
#undef CACHED_PROJECTION
template <int kInputCount>
- struct StateValuesOperator final : public Operator {
+ struct StateValuesOperator final : public Operator1<SparseInputMask> {
StateValuesOperator()
- : Operator( // --
- IrOpcode::kStateValues, // opcode
- Operator::kPure, // flags
- "StateValues", // name
- kInputCount, 0, 0, 1, 0, 0) {} // counts
+ : Operator1<SparseInputMask>( // --
+ IrOpcode::kStateValues, // opcode
+ Operator::kPure, // flags
+ "StateValues", // name
+ kInputCount, 0, 0, 1, 0, 0, // counts
+ SparseInputMask::Dense()) {} // parameter
};
#define CACHED_STATE_VALUES(input_count) \
StateValuesOperator<input_count> kStateValues##input_count##Operator;
@@ -727,6 +898,43 @@ const Operator* CommonOperatorBuilder::DeoptimizeUnless(
reason); // parameter
}
+const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
+ switch (trap_id) {
+#define CACHED_TRAP_IF(Trap) \
+ case Runtime::kThrowWasm##Trap: \
+ return &cache_.kTrapIf##Trap##Operator;
+ CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+ default:
+ break;
+ }
+ // Uncached
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kTrapIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapIf", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id); // parameter
+}
+
+const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
+ switch (trap_id) {
+#define CACHED_TRAP_UNLESS(Trap) \
+ case Runtime::kThrowWasm##Trap: \
+ return &cache_.kTrapUnless##Trap##Operator;
+ CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+ default:
+ break;
+ }
+ // Uncached
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kTrapUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapUnless", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id); // parameter
+}
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
return new (zone()) Operator( // --
@@ -1000,30 +1208,44 @@ const Operator* CommonOperatorBuilder::BeginRegion(
return nullptr;
}
-const Operator* CommonOperatorBuilder::StateValues(int arguments) {
- switch (arguments) {
+const Operator* CommonOperatorBuilder::StateValues(int arguments,
+ SparseInputMask bitmask) {
+ if (bitmask.IsDense()) {
+ switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
case arguments: \
return &cache_.kStateValues##arguments##Operator;
- CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
#undef CACHED_STATE_VALUES
- default:
- break;
+ default:
+ break;
+ }
}
+
+#if DEBUG
+ DCHECK(bitmask.IsDense() || bitmask.CountReal() == arguments);
+#endif
+
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kStateValues, Operator::kPure, // opcode
- "StateValues", // name
- arguments, 0, 0, 1, 0, 0); // counts
+ return new (zone()) Operator1<SparseInputMask>( // --
+ IrOpcode::kStateValues, Operator::kPure, // opcode
+ "StateValues", // name
+ arguments, 0, 0, 1, 0, 0, // counts
+ bitmask); // parameter
}
const Operator* CommonOperatorBuilder::TypedStateValues(
- const ZoneVector<MachineType>* types) {
- return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
- IrOpcode::kTypedStateValues, Operator::kPure, // opcode
- "TypedStateValues", // name
- static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
- types); // parameter
+ const ZoneVector<MachineType>* types, SparseInputMask bitmask) {
+#if DEBUG
+ DCHECK(bitmask.IsDense() ||
+ bitmask.CountReal() == static_cast<int>(types->size()));
+#endif
+
+ return new (zone()) Operator1<TypedStateValueInfo>( // --
+ IrOpcode::kTypedStateValues, Operator::kPure, // opcode
+ "TypedStateValues", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
+ TypedStateValueInfo(types, bitmask)); // parameters
}
const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
@@ -1131,6 +1353,43 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
}
}
+const Operator* CommonOperatorBuilder::Int32x4ExtractLane(int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kInt32x4ExtractLane, Operator::kPure, // opcode
+ "Int32x4ExtractLane", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Int32x4ReplaceLane(int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kInt32x4ReplaceLane, Operator::kPure, // opcode
+ "Int32x4ReplaceLane", // name
+ 2, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Float32x4ExtractLane(
+ int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kFloat32x4ExtractLane, Operator::kPure, // opcode
+ "Float32x4ExtractLane", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Float32x4ReplaceLane(
+ int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kFloat32x4ReplaceLane, Operator::kPure, // opcode
+ "Float32x4ReplaceLane", // name
+ 2, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateFrameStateFunctionInfo(
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 1f258a0ec0..5d0a6df31d 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -22,6 +22,7 @@ class CallDescriptor;
struct CommonOperatorGlobalCache;
class Operator;
class Type;
+class Node;
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
@@ -48,6 +49,9 @@ V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
+// Helper function for return nodes, because returns have a hidden value input.
+int ValueInputCountOfReturn(Operator const* const op);
+
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft };
@@ -158,6 +162,123 @@ std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
size_t hash_value(RelocatablePtrConstantInfo const& p);
+// Used to define a sparse set of inputs. This can be used to efficiently encode
+// nodes that can have a lot of inputs, but where many inputs can have the same
+// value.
+class SparseInputMask final {
+ public:
+ typedef uint32_t BitMaskType;
+
+ // The mask representing a dense input set.
+ static const BitMaskType kDenseBitMask = 0x0;
+ // The bits representing the end of a sparse input set.
+ static const BitMaskType kEndMarker = 0x1;
+ // The mask for accessing a sparse input entry in the bitmask.
+ static const BitMaskType kEntryMask = 0x1;
+
+ // The number of bits in the mask, minus one for the end marker.
+ static const int kMaxSparseInputs = (sizeof(BitMaskType) * kBitsPerByte - 1);
+
+ // An iterator over a node's sparse inputs.
+ class InputIterator final {
+ public:
+ InputIterator() {}
+ InputIterator(BitMaskType bit_mask, Node* parent);
+
+ Node* parent() const { return parent_; }
+ int real_index() const { return real_index_; }
+
+ // Advance the iterator to the next sparse input. Only valid if the iterator
+ // has not reached the end.
+ void Advance();
+
+ // Get the current sparse input's real node value. Only valid if the
+ // current sparse input is real.
+ Node* GetReal() const;
+
+ // Get the current sparse input, returning either a real input node if
+ // the current sparse input is real, or the given {empty_value} if the
+ // current sparse input is empty.
+ Node* Get(Node* empty_value) const {
+ return IsReal() ? GetReal() : empty_value;
+ }
+
+ // True if the current sparse input is a real input node.
+ bool IsReal() const;
+
+ // True if the current sparse input is an empty value.
+ bool IsEmpty() const { return !IsReal(); }
+
+ // True if the iterator has reached the end of the sparse inputs.
+ bool IsEnd() const;
+
+ private:
+ BitMaskType bit_mask_;
+ Node* parent_;
+ int real_index_;
+ };
+
+ explicit SparseInputMask(BitMaskType bit_mask) : bit_mask_(bit_mask) {}
+
+ // Provides a SparseInputMask representing a dense input set.
+ static SparseInputMask Dense() { return SparseInputMask(kDenseBitMask); }
+
+ BitMaskType mask() const { return bit_mask_; }
+
+ bool IsDense() const { return bit_mask_ == SparseInputMask::kDenseBitMask; }
+
+ // Counts how many real values are in the sparse array. Only valid for
+ // non-dense masks.
+ int CountReal() const;
+
+ // Returns an iterator over the sparse inputs of {node}.
+ InputIterator IterateOverInputs(Node* node);
+
+ private:
+ //
+ // The sparse input mask has a bitmask specifying if the node's inputs are
+ // represented sparsely. If the bitmask value is 0, then the inputs are dense;
+ // otherwise, they should be interpreted as follows:
+ //
+ // * The bitmask represents which values are real, with 1 for real values
+ // and 0 for empty values.
+ // * The inputs to the node are the real values, in the order of the 1s from
+ // least- to most-significant.
+ // * The top bit of the bitmask is a guard indicating the end of the values,
+ // whether real or empty (and is not representative of a real input
+ // itself). This is used so that we don't have to additionally store a
+ // value count.
+ //
+ // So, for N 1s in the bitmask, there are N - 1 inputs into the node.
+ BitMaskType bit_mask_;
+};
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs);
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs);
+
+class TypedStateValueInfo final {
+ public:
+ TypedStateValueInfo(ZoneVector<MachineType> const* machine_types,
+ SparseInputMask sparse_input_mask)
+ : machine_types_(machine_types), sparse_input_mask_(sparse_input_mask) {}
+
+ ZoneVector<MachineType> const* machine_types() const {
+ return machine_types_;
+ }
+ SparseInputMask sparse_input_mask() const { return sparse_input_mask_; }
+
+ private:
+ ZoneVector<MachineType> const* machine_types_;
+ SparseInputMask sparse_input_mask_;
+};
+
+bool operator==(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+bool operator!=(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+
+std::ostream& operator<<(std::ostream&, TypedStateValueInfo const&);
+
+size_t hash_value(TypedStateValueInfo const& p);
+
// Used to mark a region (as identified by BeginRegion/FinishRegion) as either
// JavaScript-observable or not (i.e. allocations are not JavaScript observable
// themselves, but transitioning stores are).
@@ -181,6 +302,8 @@ size_t hash_value(OsrGuardType type);
std::ostream& operator<<(std::ostream&, OsrGuardType);
OsrGuardType OsrGuardTypeOf(Operator const*);
+SparseInputMask SparseInputMaskOf(Operator const*);
+
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
WARN_UNUSED_RESULT;
@@ -205,6 +328,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
const Operator* DeoptimizeIf(DeoptimizeReason reason);
const Operator* DeoptimizeUnless(DeoptimizeReason reason);
+ const Operator* TrapIf(int32_t trap_id);
+ const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
@@ -243,8 +368,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Checkpoint();
const Operator* BeginRegion(RegionObservability);
const Operator* FinishRegion();
- const Operator* StateValues(int arguments);
- const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+ const Operator* StateValues(int arguments, SparseInputMask bitmask);
+ const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
+ SparseInputMask bitmask);
const Operator* ObjectState(int pointer_slots);
const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
@@ -260,6 +386,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
// with {size} inputs.
const Operator* ResizeMergeOrPhi(const Operator* op, int size);
+ // Simd Operators
+ const Operator* Int32x4ExtractLane(int32_t);
+ const Operator* Int32x4ReplaceLane(int32_t);
+ const Operator* Float32x4ExtractLane(int32_t);
+ const Operator* Float32x4ReplaceLane(int32_t);
+
// Constructs function info for frame state construction.
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index b159bb2da7..a0b3ebdd77 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -4,6 +4,8 @@
#include "src/compiler/control-builders.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -180,65 +182,6 @@ void BlockBuilder::EndBlock() {
set_environment(break_environment_);
}
-
-void TryCatchBuilder::BeginTry() {
- exit_environment_ = environment()->CopyAsUnreachable();
- catch_environment_ = environment()->CopyAsUnreachable();
- catch_environment_->Push(the_hole());
-}
-
-
-void TryCatchBuilder::Throw(Node* exception) {
- environment()->Push(exception);
- catch_environment_->Merge(environment());
- environment()->Pop();
- environment()->MarkAsUnreachable();
-}
-
-
-void TryCatchBuilder::EndTry() {
- exit_environment_->Merge(environment());
- exception_node_ = catch_environment_->Pop();
- set_environment(catch_environment_);
-}
-
-
-void TryCatchBuilder::EndCatch() {
- exit_environment_->Merge(environment());
- set_environment(exit_environment_);
-}
-
-
-void TryFinallyBuilder::BeginTry() {
- finally_environment_ = environment()->CopyAsUnreachable();
- finally_environment_->Push(the_hole());
- finally_environment_->Push(the_hole());
-}
-
-
-void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
- environment()->Push(value);
- environment()->Push(token);
- finally_environment_->Merge(environment());
- environment()->Drop(2);
-}
-
-
-void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
- environment()->Push(value);
- environment()->Push(fallthrough_token);
- finally_environment_->Merge(environment());
- environment()->Drop(2);
- token_node_ = finally_environment_->Pop();
- value_node_ = finally_environment_->Pop();
- set_environment(finally_environment_);
-}
-
-
-void TryFinallyBuilder::EndFinally() {
- // Nothing to be done here.
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index a59dcb699a..88efd276ad 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -145,59 +145,6 @@ class BlockBuilder final : public ControlBuilder {
Environment* break_environment_; // Environment after the block exits.
};
-
-// Tracks control flow for a try-catch statement.
-class TryCatchBuilder final : public ControlBuilder {
- public:
- explicit TryCatchBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder),
- catch_environment_(nullptr),
- exit_environment_(nullptr),
- exception_node_(nullptr) {}
-
- // Primitive control commands.
- void BeginTry();
- void Throw(Node* exception);
- void EndTry();
- void EndCatch();
-
- // Returns the exception value inside the 'catch' body.
- Node* GetExceptionNode() const { return exception_node_; }
-
- private:
- Environment* catch_environment_; // Environment for the 'catch' body.
- Environment* exit_environment_; // Environment after the statement.
- Node* exception_node_; // Node for exception in 'catch' body.
-};
-
-
-// Tracks control flow for a try-finally statement.
-class TryFinallyBuilder final : public ControlBuilder {
- public:
- explicit TryFinallyBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder),
- finally_environment_(nullptr),
- token_node_(nullptr),
- value_node_(nullptr) {}
-
- // Primitive control commands.
- void BeginTry();
- void LeaveTry(Node* token, Node* value);
- void EndTry(Node* token, Node* value);
- void EndFinally();
-
- // Returns the dispatch token value inside the 'finally' body.
- Node* GetDispatchTokenNode() const { return token_node_; }
-
- // Returns the saved result value inside the 'finally' body.
- Node* GetResultValueNode() const { return value_node_; }
-
- private:
- Environment* finally_environment_; // Environment for the 'finally' body.
- Node* token_node_; // Node for token in 'finally' body.
- Node* value_node_; // Node for value in 'finally' body.
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 81bf2997e6..d66a9c58d5 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -18,8 +18,9 @@ DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
: AdvancedReducer(editor),
graph_(graph),
common_(common),
- dead_(graph->NewNode(common->Dead())) {}
-
+ dead_(graph->NewNode(common->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
Reduction DeadCodeElimination::Reduce(Node* node) {
switch (node->opcode()) {
@@ -40,11 +41,11 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
Reduction DeadCodeElimination::ReduceEnd(Node* node) {
DCHECK_EQ(IrOpcode::kEnd, node->opcode());
- int const input_count = node->InputCount();
- DCHECK_LE(1, input_count);
+ Node::Inputs inputs = node->inputs();
+ DCHECK_LE(1, inputs.count());
int live_input_count = 0;
- for (int i = 0; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 0; i < inputs.count(); ++i) {
+ Node* const input = inputs[i];
// Skip dead inputs.
if (input->opcode() == IrOpcode::kDead) continue;
// Compact live inputs.
@@ -53,20 +54,20 @@ Reduction DeadCodeElimination::ReduceEnd(Node* node) {
}
if (live_input_count == 0) {
return Replace(dead());
- } else if (live_input_count < input_count) {
+ } else if (live_input_count < inputs.count()) {
node->TrimInputCount(live_input_count);
NodeProperties::ChangeOp(node, common()->End(live_input_count));
return Changed(node);
}
- DCHECK_EQ(input_count, live_input_count);
+ DCHECK_EQ(inputs.count(), live_input_count);
return NoChange();
}
Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
- int const input_count = node->InputCount();
- DCHECK_LE(1, input_count);
+ Node::Inputs inputs = node->inputs();
+ DCHECK_LE(1, inputs.count());
// Count the number of live inputs to {node} and compact them on the fly, also
// compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
// same time. We consider {Loop}s dead even if only the first control input
@@ -74,8 +75,8 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
int live_input_count = 0;
if (node->opcode() != IrOpcode::kLoop ||
node->InputAt(0)->opcode() != IrOpcode::kDead) {
- for (int i = 0; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 0; i < inputs.count(); ++i) {
+ Node* const input = inputs[i];
// Skip dead inputs.
if (input->opcode() == IrOpcode::kDead) continue;
// Compact live inputs.
@@ -83,7 +84,7 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
node->ReplaceInput(live_input_count, input);
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
- DCHECK_EQ(input_count + 1, use->InputCount());
+ DCHECK_EQ(inputs.count() + 1, use->InputCount());
use->ReplaceInput(live_input_count, use->InputAt(i));
}
}
@@ -109,9 +110,9 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
return Replace(node->InputAt(0));
}
DCHECK_LE(2, live_input_count);
- DCHECK_LE(live_input_count, input_count);
+ DCHECK_LE(live_input_count, inputs.count());
// Trim input count for the {Merge} or {Loop} node.
- if (live_input_count < input_count) {
+ if (live_input_count < inputs.count()) {
// Trim input counts for all phi uses and revisit them.
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d4b0576f79..b88906cfc1 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -24,7 +24,8 @@ EffectControlLinearizer::EffectControlLinearizer(
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- source_positions_(source_positions) {}
+ source_positions_(source_positions),
+ graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -596,829 +597,690 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
Node* frame_state,
Node** effect,
Node** control) {
- ValueEffectControl state(nullptr, nullptr, nullptr);
+ gasm()->Reset(*effect, *control);
+ Node* result = nullptr;
switch (node->opcode()) {
case IrOpcode::kChangeBitToTagged:
- state = LowerChangeBitToTagged(node, *effect, *control);
+ result = LowerChangeBitToTagged(node);
break;
case IrOpcode::kChangeInt31ToTaggedSigned:
- state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+ result = LowerChangeInt31ToTaggedSigned(node);
break;
case IrOpcode::kChangeInt32ToTagged:
- state = LowerChangeInt32ToTagged(node, *effect, *control);
+ result = LowerChangeInt32ToTagged(node);
break;
case IrOpcode::kChangeUint32ToTagged:
- state = LowerChangeUint32ToTagged(node, *effect, *control);
+ result = LowerChangeUint32ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTagged:
- state = LowerChangeFloat64ToTagged(node, *effect, *control);
+ result = LowerChangeFloat64ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTaggedPointer:
- state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+ result = LowerChangeFloat64ToTaggedPointer(node);
break;
case IrOpcode::kChangeTaggedSignedToInt32:
- state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+ result = LowerChangeTaggedSignedToInt32(node);
break;
case IrOpcode::kChangeTaggedToBit:
- state = LowerChangeTaggedToBit(node, *effect, *control);
+ result = LowerChangeTaggedToBit(node);
break;
case IrOpcode::kChangeTaggedToInt32:
- state = LowerChangeTaggedToInt32(node, *effect, *control);
+ result = LowerChangeTaggedToInt32(node);
break;
case IrOpcode::kChangeTaggedToUint32:
- state = LowerChangeTaggedToUint32(node, *effect, *control);
+ result = LowerChangeTaggedToUint32(node);
break;
case IrOpcode::kChangeTaggedToFloat64:
- state = LowerChangeTaggedToFloat64(node, *effect, *control);
+ result = LowerChangeTaggedToFloat64(node);
break;
case IrOpcode::kTruncateTaggedToBit:
- state = LowerTruncateTaggedToBit(node, *effect, *control);
+ result = LowerTruncateTaggedToBit(node);
break;
case IrOpcode::kTruncateTaggedToFloat64:
- state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+ result = LowerTruncateTaggedToFloat64(node);
break;
case IrOpcode::kCheckBounds:
- state = LowerCheckBounds(node, frame_state, *effect, *control);
+ result = LowerCheckBounds(node, frame_state);
break;
case IrOpcode::kCheckMaps:
- state = LowerCheckMaps(node, frame_state, *effect, *control);
+ result = LowerCheckMaps(node, frame_state);
break;
case IrOpcode::kCheckNumber:
- state = LowerCheckNumber(node, frame_state, *effect, *control);
+ result = LowerCheckNumber(node, frame_state);
break;
case IrOpcode::kCheckString:
- state = LowerCheckString(node, frame_state, *effect, *control);
+ result = LowerCheckString(node, frame_state);
+ break;
+ case IrOpcode::kCheckInternalizedString:
+ result = LowerCheckInternalizedString(node, frame_state);
break;
case IrOpcode::kCheckIf:
- state = LowerCheckIf(node, frame_state, *effect, *control);
+ result = LowerCheckIf(node, frame_state);
break;
case IrOpcode::kCheckedInt32Add:
- state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Add(node, frame_state);
break;
case IrOpcode::kCheckedInt32Sub:
- state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Sub(node, frame_state);
break;
case IrOpcode::kCheckedInt32Div:
- state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Div(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mod:
- state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Mod(node, frame_state);
break;
case IrOpcode::kCheckedUint32Div:
- state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32Div(node, frame_state);
break;
case IrOpcode::kCheckedUint32Mod:
- state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32Mod(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mul:
- state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Mul(node, frame_state);
break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
- state =
- LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToInt32:
- state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToTaggedSigned:
- state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
- *control);
+ result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedFloat64ToInt32:
- state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
- state =
- LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt32:
- state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToFloat64:
- state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedToFloat64(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToTaggedSigned:
- state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToTaggedPointer:
- state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
case IrOpcode::kTruncateTaggedToWord32:
- state = LowerTruncateTaggedToWord32(node, *effect, *control);
+ result = LowerTruncateTaggedToWord32(node);
break;
case IrOpcode::kCheckedTruncateTaggedToWord32:
- state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
case IrOpcode::kObjectIsCallable:
- state = LowerObjectIsCallable(node, *effect, *control);
+ result = LowerObjectIsCallable(node);
break;
case IrOpcode::kObjectIsNumber:
- state = LowerObjectIsNumber(node, *effect, *control);
+ result = LowerObjectIsNumber(node);
break;
case IrOpcode::kObjectIsReceiver:
- state = LowerObjectIsReceiver(node, *effect, *control);
+ result = LowerObjectIsReceiver(node);
break;
case IrOpcode::kObjectIsSmi:
- state = LowerObjectIsSmi(node, *effect, *control);
+ result = LowerObjectIsSmi(node);
break;
case IrOpcode::kObjectIsString:
- state = LowerObjectIsString(node, *effect, *control);
+ result = LowerObjectIsString(node);
break;
case IrOpcode::kObjectIsUndetectable:
- state = LowerObjectIsUndetectable(node, *effect, *control);
+ result = LowerObjectIsUndetectable(node);
+ break;
+ case IrOpcode::kNewRestParameterElements:
+ result = LowerNewRestParameterElements(node);
+ break;
+ case IrOpcode::kNewUnmappedArgumentsElements:
+ result = LowerNewUnmappedArgumentsElements(node);
break;
case IrOpcode::kArrayBufferWasNeutered:
- state = LowerArrayBufferWasNeutered(node, *effect, *control);
+ result = LowerArrayBufferWasNeutered(node);
break;
case IrOpcode::kStringFromCharCode:
- state = LowerStringFromCharCode(node, *effect, *control);
+ result = LowerStringFromCharCode(node);
break;
case IrOpcode::kStringFromCodePoint:
- state = LowerStringFromCodePoint(node, *effect, *control);
+ result = LowerStringFromCodePoint(node);
+ break;
+ case IrOpcode::kStringCharAt:
+ result = LowerStringCharAt(node);
break;
case IrOpcode::kStringCharCodeAt:
- state = LowerStringCharCodeAt(node, *effect, *control);
+ result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringEqual:
- state = LowerStringEqual(node, *effect, *control);
+ result = LowerStringEqual(node);
break;
case IrOpcode::kStringLessThan:
- state = LowerStringLessThan(node, *effect, *control);
+ result = LowerStringLessThan(node);
break;
case IrOpcode::kStringLessThanOrEqual:
- state = LowerStringLessThanOrEqual(node, *effect, *control);
+ result = LowerStringLessThanOrEqual(node);
break;
case IrOpcode::kCheckFloat64Hole:
- state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+ result = LowerCheckFloat64Hole(node, frame_state);
break;
case IrOpcode::kCheckTaggedHole:
- state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+ result = LowerCheckTaggedHole(node, frame_state);
break;
case IrOpcode::kConvertTaggedHoleToUndefined:
- state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+ result = LowerConvertTaggedHoleToUndefined(node);
break;
case IrOpcode::kPlainPrimitiveToNumber:
- state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+ result = LowerPlainPrimitiveToNumber(node);
break;
case IrOpcode::kPlainPrimitiveToWord32:
- state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+ result = LowerPlainPrimitiveToWord32(node);
break;
case IrOpcode::kPlainPrimitiveToFloat64:
- state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+ result = LowerPlainPrimitiveToFloat64(node);
break;
case IrOpcode::kEnsureWritableFastElements:
- state = LowerEnsureWritableFastElements(node, *effect, *control);
+ result = LowerEnsureWritableFastElements(node);
break;
case IrOpcode::kMaybeGrowFastElements:
- state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+ result = LowerMaybeGrowFastElements(node, frame_state);
break;
case IrOpcode::kTransitionElementsKind:
- state = LowerTransitionElementsKind(node, *effect, *control);
+ LowerTransitionElementsKind(node);
break;
case IrOpcode::kLoadTypedElement:
- state = LowerLoadTypedElement(node, *effect, *control);
+ result = LowerLoadTypedElement(node);
break;
case IrOpcode::kStoreTypedElement:
- state = LowerStoreTypedElement(node, *effect, *control);
+ LowerStoreTypedElement(node);
break;
case IrOpcode::kFloat64RoundUp:
- state = LowerFloat64RoundUp(node, *effect, *control);
+ if (!LowerFloat64RoundUp(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundDown:
- state = LowerFloat64RoundDown(node, *effect, *control);
+ if (!LowerFloat64RoundDown(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundTruncate:
- state = LowerFloat64RoundTruncate(node, *effect, *control);
+ if (!LowerFloat64RoundTruncate(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundTiesEven:
- state = LowerFloat64RoundTiesEven(node, *effect, *control);
+ if (!LowerFloat64RoundTiesEven(node).To(&result)) {
+ return false;
+ }
break;
default:
return false;
}
- NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
- *effect = state.effect;
- *control = state.control;
+ *effect = gasm()->ExtractCurrentEffect();
+ *control = gasm()->ExtractCurrentControl();
+ NodeProperties::ReplaceUses(node, result, *effect, *control);
return true;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control) {
+#define __ gasm()->
+
+Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
Node* value = node->InputAt(0);
- return AllocateHeapNumberWithValue(value, effect, control);
+ return AllocateHeapNumberWithValue(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
Node* value = node->InputAt(0);
- return AllocateHeapNumberWithValue(value, effect, control);
+ return AllocateHeapNumberWithValue(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
Node* value = node->InputAt(0);
- Node* branch = graph()->NewNode(common()->Branch(), value, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->TrueConstant();
+ auto if_true = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->FalseConstant();
+ __ GotoIf(value, &if_true);
+ __ Goto(&done, __ FalseConstant());
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_true);
+ __ Goto(&done, __ TrueConstant());
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
- value = ChangeInt32ToSmi(value);
- return ValueEffectControl(value, effect, control);
+ return ChangeInt32ToSmi(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
if (machine()->Is64()) {
- return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+ return ChangeInt32ToSmi(value);
}
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
- control);
+ auto if_overflow = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* ovf = __ Projection(1, add);
+ __ GotoIf(ovf, &if_overflow);
+ __ Goto(&done, __ Projection(0, add));
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- ValueEffectControl alloc =
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+ __ Bind(&if_overflow);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
+ __ Goto(&done, number);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
-
- Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- alloc.value, vfalse, merge);
- Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
-
- return ValueEffectControl(phi, ephi, merge);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_in_smi_range = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = ChangeUint32ToSmi(value);
+ Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+ __ GotoUnless(check, &if_not_in_smi_range);
+ __ Goto(&done, ChangeUint32ToSmi(value));
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl alloc = AllocateHeapNumberWithValue(
- ChangeUint32ToFloat64(value), effect, if_false);
+ __ Bind(&if_not_in_smi_range);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, alloc.value, merge);
- Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+ __ Goto(&done, number);
+ __ Bind(&done);
- return ValueEffectControl(phi, ephi, merge);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
Node* value = node->InputAt(0);
- value = ChangeSmiToInt32(value);
- return ValueEffectControl(value, effect, control);
+ return ChangeSmiToInt32(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
- value = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant());
- return ValueEffectControl(value, effect, control);
+ return __ WordEqual(value, __ TrueConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
- Node* zero = jsgraph()->Int32Constant(0);
- Node* fzero = jsgraph()->Float64Constant(0.0);
- // Collect effect/control/value triples.
- int count = 0;
- Node* values[6];
- Node* effects[6];
- Node* controls[5];
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto if_not_oddball = __ MakeDeferredLabel<1>();
+ auto if_not_string = __ MakeDeferredLabel<1>();
+ auto if_not_heapnumber = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<5>(MachineRepresentation::kBit);
+
+ Node* zero = __ Int32Constant(0);
+ Node* fzero = __ Float64Constant(0.0);
// Check if {value} is a Smi.
Node* check_smi = ObjectIsSmi(value);
- Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_smi, control);
-
- // If {value} is a Smi, then we only need to check that it's not zero.
- Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
- Node* esmi = effect;
- {
- controls[count] = if_smi;
- effects[count] = esmi;
- values[count] =
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->IntPtrConstant(0)),
- zero);
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_smi);
+ __ GotoIf(check_smi, &if_smi);
// Load the map instance type of {value}.
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
- Node* value_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- effect, control);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
// Check if {value} is an Oddball.
Node* check_oddball =
- graph()->NewNode(machine()->Word32Equal(), value_instance_type,
- jsgraph()->Int32Constant(ODDBALL_TYPE));
- Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check_oddball, control);
+ __ Word32Equal(value_instance_type, __ Int32Constant(ODDBALL_TYPE));
+ __ GotoUnless(check_oddball, &if_not_oddball);
// The only Oddball {value} that is trueish is true itself.
- Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
- Node* eoddball = effect;
- {
- controls[count] = if_oddball;
- effects[count] = eoddball;
- values[count] = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant());
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_oddball);
+ __ Goto(&done, __ WordEqual(value, __ TrueConstant()));
+ __ Bind(&if_not_oddball);
// Check if {value} is a String.
- Node* check_string =
- graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
- jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
- Node* branch_string =
- graph()->NewNode(common()->Branch(), check_string, control);
-
+ Node* check_string = __ Int32LessThan(value_instance_type,
+ __ Int32Constant(FIRST_NONSTRING_TYPE));
+ __ GotoUnless(check_string, &if_not_string);
// For String {value}, we need to check that the length is not zero.
- Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
- Node* estring = effect;
- {
- // Load the {value} length.
- Node* value_length = estring = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), value,
- estring, if_string);
-
- controls[count] = if_string;
- effects[count] = estring;
- values[count] =
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->WordEqual(), value_length,
- jsgraph()->IntPtrConstant(0)),
- zero);
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_string);
+ Node* value_length = __ LoadField(AccessBuilder::ForStringLength(), value);
+ __ Goto(&done, __ Word32Equal(
+ __ WordEqual(value_length, __ IntPtrConstant(0)), zero));
+ __ Bind(&if_not_string);
// Check if {value} is a HeapNumber.
Node* check_heapnumber =
- graph()->NewNode(machine()->Word32Equal(), value_instance_type,
- jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
- Node* branch_heapnumber =
- graph()->NewNode(common()->Branch(), check_heapnumber, control);
-
- // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
- Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
- Node* eheapnumber = effect;
- {
- // Load the raw value of {value}.
- Node* value_value = eheapnumber = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- eheapnumber, if_heapnumber);
-
- // Check if {value} is not one of 0, -0, or NaN.
- controls[count] = if_heapnumber;
- effects[count] = eheapnumber;
- values[count] = graph()->NewNode(
- machine()->Float64LessThan(), fzero,
- graph()->NewNode(machine()->Float64Abs(), value_value));
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
+ __ Word32Equal(value_instance_type, __ Int32Constant(HEAP_NUMBER_TYPE));
+ __ GotoUnless(check_heapnumber, &if_not_heapnumber);
+
+ // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+ // NaN.
+ // Load the raw value of {value}.
+ Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
// The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
// those cases we can just the undetectable bit on the map, which will only
// be set for certain JSReceivers, i.e. document.all.
- {
- // Load the {value} map bit field.
- Node* value_map_bitfield = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- effect, control);
-
- controls[count] = control;
- effects[count] = effect;
- values[count] = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), value_map_bitfield,
- jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
- zero);
- count++;
- }
+ __ Bind(&if_not_heapnumber);
+
+ // Load the {value} map bit field.
+ Node* value_map_bitfield =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ __ Goto(&done, __ Word32Equal(
+ __ Word32And(value_map_bitfield,
+ __ Int32Constant(1 << Map::kIsUndetectable)),
+ zero));
- // Merge the different controls.
- control = graph()->NewNode(common()->Merge(count), count, controls);
- effects[count] = control;
- effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
- values[count] = control;
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
- count + 1, values);
+ __ Bind(&if_smi);
+ // If {value} is a Smi, then we only need to check that it's not zero.
+ __ Goto(&done,
+ __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToInt32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToUint32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
- return LowerTruncateTaggedToFloat64(node, effect, control);
+Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
+ return LowerTruncateTaggedToFloat64(node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- vtrue = ChangeSmiToInt32(value);
- vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
- }
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ Node* vtrue = ChangeSmiToInt32(value);
+ vtrue = __ ChangeInt32ToFloat64(vtrue);
+ __ Goto(&done, vtrue);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
- Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
- frame_state, effect, control);
-
- return ValueEffectControl(index, effect, control);
+ Node* check = __ Uint32LessThan(index, limit);
+ __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check, frame_state);
+ return index;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
+ CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
Node* value = node->InputAt(0);
- // Load the current map of the {value}.
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ ZoneHandleSet<Map> const& maps = p.maps();
+ size_t const map_count = maps.size();
- int const map_count = node->op()->ValueInputCount() - 1;
- Node** controls = temp_zone()->NewArray<Node*>(map_count);
- Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+ if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
+ auto done =
+ __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count * 2);
+ auto migrate = __ MakeDeferredLabel<1>();
- for (int i = 0; i < map_count; ++i) {
- Node* map = node->InputAt(1 + i);
+ // Load the current map of the {value}.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
- if (i == map_count - 1) {
- controls[i] = effects[i] = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
- frame_state, effect, control);
- } else {
- control = graph()->NewNode(common()->Branch(), check, control);
- controls[i] = graph()->NewNode(common()->IfTrue(), control);
- control = graph()->NewNode(common()->IfFalse(), control);
- effects[i] = effect;
+ // Perform the map checks.
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ GotoUnless(check, &migrate);
+ __ Goto(&done);
+ } else {
+ __ GotoIf(check, &done);
+ }
}
- }
- control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
- effects[map_count] = control;
- effect =
- graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
+ // Perform the (deferred) instance migration.
+ __ Bind(&migrate);
+ {
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTryMigrateInstance;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ Node* result =
+ __ Call(desc, __ CEntryStubConstant(1), value,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+ Node* check = ObjectIsSmi(result);
+ __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
+ frame_state);
+ }
- return ValueEffectControl(value, effect, control);
-}
+ // Reload the current map of the {value}.
+ value_map = __ LoadField(AccessBuilder::ForMap(), value);
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* value = node->InputAt(0);
+ // Perform the map checks again.
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+ } else {
+ __ GotoIf(check, &done);
+ }
+ }
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ __ Goto(&done);
+ __ Bind(&done);
+ } else {
+ auto done =
+ __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
+ // Load the current map of the {value}.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- {
- Node* value_map = efalse0 =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse0, if_false0);
- Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- if_false0 = efalse0 = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
- frame_state, efalse0, if_false0);
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+ } else {
+ __ GotoIf(check, &done);
+ }
+ }
+ __ Goto(&done);
+ __ Bind(&done);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
- frame_state, effect, control);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>();
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
- Node* value_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- effect, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ __ Goto(&done);
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
- frame_state, effect, control);
+ __ Bind(&if_not_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+ __ Goto(&done);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
- value, frame_state, effect, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- return ValueEffectControl(value, effect, control);
+ Node* check1 = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
+ frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
+Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
- Node* value =
- graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check = graph()->NewNode(common()->Projection(1), value, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
+ Node* check1 = __ Word32Equal(
+ __ Word32And(value_instance_type,
+ __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag));
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
+ frame_state);
- value = graph()->NewNode(common()->Projection(0), value, control);
+ return value;
+}
- return ValueEffectControl(value, effect, control);
+Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ __ DeoptimizeUnless(DeoptimizeReason::kNoReason, value, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
- Node* value =
- graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
-
- Node* check = graph()->NewNode(common()->Projection(1), value, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
+ Node* value = __ Int32AddWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, value);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, value);
+}
- value = graph()->NewNode(common()->Projection(0), value, control);
+Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
+ Node* frame_state) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
- return ValueEffectControl(value, effect, control);
+ Node* value = __ Int32SubWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, value);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
- Node* minusone = jsgraph()->Int32Constant(-1);
- Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
-
+Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_is_minint = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ auto minint_check_done = __ MakeLabel<2>();
+
+ Node* zero = __ Int32Constant(0);
+
// Check if {rhs} is positive (and not zero).
- Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* check0 = __ Int32LessThan(zero, rhs);
+ __ GotoUnless(check0, &if_not_positive);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- // Fast case, no additional checking required.
- vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
- }
+ // Fast case, no additional checking required.
+ __ Goto(&done, __ Int32Div(lhs, rhs));
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
{
+ __ Bind(&if_not_positive);
+
// Check if {rhs} is zero.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- if_false0 = efalse0 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, efalse0, if_false0);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Check if {lhs} is zero, as that would produce minus zero.
- check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
- if_false0 = efalse0 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check, frame_state, efalse0, if_false0);
+ check = __ Word32Equal(lhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
+ Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ __ GotoIf(check1, &if_is_minint);
+ __ Goto(&minint_check_done);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- {
- // Check if {rhs} is -1.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
- if_true1 = etrue1 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ __ Bind(&if_is_minint);
+ // Check if {rhs} is -1.
+ Node* minusone = __ Int32Constant(-1);
+ Node* is_minus_one = __ Word32Equal(rhs, minusone);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+ __ Goto(&minint_check_done);
+ __ Bind(&minint_check_done);
// Perform the actual integer division.
- vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+ __ Goto(&done, __ Int32Div(lhs, rhs));
}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
- vfalse0, control);
+ __ Bind(&done);
+ Node* value = done.PhiAt(0);
// Check if the remainder is non-zero.
- Node* check =
- graph()->NewNode(machine()->Word32Equal(), lhs,
- graph()->NewNode(machine()->Int32Mul(), rhs, value));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
- Node* one = jsgraph()->Int32Constant(1);
-
+Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
+ Node* frame_state) {
// General case for signed integer modulus, with optimization for (unknown)
// power of 2 right hand side.
//
@@ -1439,1226 +1301,673 @@ EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ auto if_rhs_not_positive = __ MakeDeferredLabel<1>();
+ auto if_lhs_negative = __ MakeDeferredLabel<1>();
+ auto if_power_of_two = __ MakeLabel<1>();
+ auto rhs_checked = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
+
+ Node* zero = __ Int32Constant(0);
+
// Check if {rhs} is not strictly positive.
- Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
+ __ GotoIf(check0, &if_rhs_not_positive);
+ __ Goto(&rhs_checked, rhs);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
+ __ Bind(&if_rhs_not_positive);
{
// Negate {rhs}, might still produce a negative result in case of
// -2^31, but that is handled safely below.
- vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+ Node* vtrue0 = __ Int32Sub(zero, rhs);
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
- if_true0 = etrue0 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, etrue0, if_true0);
+ Node* check = __ Word32Equal(vtrue0, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ Goto(&rhs_checked, vtrue0);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0 = rhs;
-
- // At this point {rhs} is either greater than zero or -2^31, both are
- // fine for the code that follows.
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue0, vfalse0, control);
+ __ Bind(&rhs_checked);
+ rhs = rhs_checked.PhiAt(0);
// Check if {lhs} is negative.
- Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
- {
- // Compute the remainder using {lhs % msk}.
- vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
-
- // Check if we would have to return -0.
- Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
- if_true1 = etrue1 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check, frame_state, etrue1, if_true1);
- }
+ Node* check1 = __ Int32LessThan(lhs, zero);
+ __ GotoIf(check1, &if_lhs_negative);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = effect;
- Node* vfalse1;
+ // {lhs} non-negative.
{
- Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+ Node* one = __ Int32Constant(1);
+ Node* msk = __ Int32Sub(rhs, one);
// Check if {rhs} minus one is a valid mask.
- Node* check2 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+ Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
+ __ GotoIf(check2, &if_power_of_two);
+ // Compute the remainder using the generic {lhs % rhs}.
+ __ Goto(&done, __ Int32Mod(lhs, rhs));
+ __ Bind(&if_power_of_two);
// Compute the remainder using {lhs & msk}.
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+ __ Goto(&done, __ Word32And(lhs, msk));
+ }
- // Compute the remainder using the generic {lhs % rhs}.
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 =
- graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+ __ Bind(&if_lhs_negative);
+ {
+ // Compute the remainder using {lhs % msk}.
+ Node* vtrue1 = __ Int32Mod(lhs, rhs);
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue2, vfalse2, if_false1);
+ // Check if we would have to return -0.
+ Node* check = __ Word32Equal(vtrue1, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ Goto(&done, vtrue1);
}
- control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
- vfalse1, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ Node* zero = __ Int32Constant(0);
+
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- control = effect = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Perform the actual unsigned integer division.
- Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+ Node* value = __ Uint32Div(lhs, rhs);
// Check if the remainder is non-zero.
- check = graph()->NewNode(machine()->Word32Equal(), lhs,
- graph()->NewNode(machine()->Int32Mul(), rhs, value));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ Node* zero = __ Int32Constant(0);
+
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- control = effect = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Perform the actual unsigned integer modulus.
- Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
-
- return ValueEffectControl(value, effect, control);
+ return __ Uint32Mod(lhs, rhs);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
- Node* zero = jsgraph()->Int32Constant(0);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
- Node* projection =
- graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+ Node* projection = __ Int32MulWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, projection);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
- Node* check = graph()->NewNode(common()->Projection(1), projection, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
-
- Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+ Node* value = __ Projection(0, projection);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, control);
-
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* e_if_zero = effect;
- {
- // We may need to return negative zero.
- Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
- Node* check_or =
- graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
- if_zero = e_if_zero =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check_or, frame_state, e_if_zero, if_zero);
- }
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto check_done = __ MakeLabel<2>();
+ Node* zero = __ Int32Constant(0);
+ Node* check_zero = __ Word32Equal(value, zero);
+ __ GotoIf(check_zero, &if_zero);
+ __ Goto(&check_done);
- Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
- Node* e_if_not_zero = effect;
+ __ Bind(&if_zero);
+ // We may need to return negative zero.
+ Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+ __ Goto(&check_done);
- control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
- effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
- control);
+ __ Bind(&check_done);
}
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
+ Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
- control);
-
- Node* check = graph()->NewNode(common()->Projection(1), add, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
-
- value = graph()->NewNode(common()->Projection(0), add, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, add);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
- Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
- Node* is_safe =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
+ Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
- value = ChangeUint32ToSmi(value);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
- Node* value,
- Node* frame_state,
- Node* effect,
- Node* control) {
- Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
- Node* check_same = graph()->NewNode(
- machine()->Float64Equal(), value,
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
- check_same, frame_state, effect, control);
+ Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+ return ChangeUint32ToSmi(value);
+}
+
+Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
+ CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+ Node* value32 = __ RoundFloat64ToInt32(value);
+ Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
+ frame_state);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
- jsgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, control);
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto check_done = __ MakeLabel<2>();
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+ Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
+ __ GotoIf(check_zero, &if_zero);
+ __ Goto(&check_done);
+ __ Bind(&if_zero);
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine()->Int32LessThan(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(0));
-
- Node* deopt_minus_zero =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check_negative, frame_state, effect, if_zero);
-
- control =
- graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
- effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
- control);
- }
+ Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(0));
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+ __ Goto(&check_done);
- return ValueEffectControl(value32, effect, control);
+ __ Bind(&check_done);
+ }
+ return value32;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
-
- return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
+ return BuildCheckedFloat64ToInt32(mode, value, frame_state);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
-
Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
- check, frame_state, effect, control);
- value = ChangeSmiToInt32(value);
-
- return ValueEffectControl(value, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
+ return ChangeSmiToInt32(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
// In the Smi case, just convert to int32.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ __ Goto(&done, ChangeSmiToInt32(value));
// In the non-Smi case, check the heap numberness, load the number and convert
// to int32.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- if_false = efalse = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
- frame_state, efalse, if_false);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- ValueEffectControl state =
- BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
- if_false = state.control;
- efalse = state.effect;
- vfalse = state.value;
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
- Node* control) {
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-
- Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
-
+ __ Bind(&if_not_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_map,
+ frame_state);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+ __ Goto(&done, vfalse);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
switch (mode) {
case CheckTaggedInputMode::kNumber: {
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
- check_number, frame_state, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_number,
+ frame_state);
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
- Node* branch =
- graph()->NewNode(common()->Branch(), check_number, control);
+ auto check_done = __ MakeLabel<2>();
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ __ GotoIf(check_number, &check_done);
// For oddballs also contain the numeric value, let us just check that
// we have an oddball here.
- Node* efalse = effect;
- Node* instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- value_map, efalse, if_false);
+ Node* instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* check_oddball =
- graph()->NewNode(machine()->Word32Equal(), instance_type,
- jsgraph()->Int32Constant(ODDBALL_TYPE));
- if_false = efalse = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
- check_oddball, frame_state, efalse, if_false);
+ __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
+ __ DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
+ frame_state);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ __ Goto(&check_done);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ __ Bind(&check_done);
break;
}
}
-
- value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- effect, control);
- return ValueEffectControl(value, effect, control);
+ return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+ Node* frame_state) {
CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
Node* value = node->InputAt(0);
+ auto if_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
Node* check = ObjectIsSmi(value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ __ GotoIf(check, &if_smi);
// In the Smi case, just convert to int32 and then float64.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
- vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-
// Otherwise, check heap numberness and load the number.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
- mode, value, frame_state, effect, if_false);
-
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_true, number_state.control);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
- number_state.effect, merge);
- Node* result =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
- number_state.value, merge);
-
- return ValueEffectControl(result, effect_phi, merge);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
- Node* value = node->InputAt(0);
+ Node* number =
+ BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ __ Goto(&done, number);
- Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
- check, frame_state, effect, control);
+ __ Bind(&if_smi);
+ Node* from_smi = ChangeSmiToInt32(value);
+ from_smi = __ ChangeInt32ToFloat64(from_smi);
+ __ Goto(&done, from_smi);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
- frame_state, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+ return value;
+}
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
+ Node* value = node->InputAt(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
- }
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ TruncateFloat64ToWord32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
// In the Smi case, just convert to int32.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ __ Goto(&done, ChangeSmiToInt32(value));
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
- if_false);
- false_state.value =
- graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
-
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_true, false_state.control);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
- false_state.effect, merge);
- Node* result =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
- false_state.value, merge);
-
- return ValueEffectControl(result, effect_phi, merge);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
- Node* control) {
+ __ Bind(&if_not_smi);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+ number = __ TruncateFloat64ToWord32(number);
+ __ Goto(&done, number);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_bit_field = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- jsgraph()->Int32Constant(1 << Map::kIsCallable),
- graph()->NewNode(
- machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable))));
- }
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* vfalse = __ Word32Equal(
+ __ Int32Constant(1 << Map::kIsCallable),
+ __ Word32And(value_bit_field,
+ __ Int32Constant((1 << Map::kIsCallable) |
+ (1 << Map::kIsUndetectable))));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(1);
+ auto if_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- }
+ __ GotoIf(ObjectIsSmi(value), &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(1));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ __ GotoIf(ObjectIsSmi(value), &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
- value_instance_type);
- }
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* result = __ Uint32LessThanOrEqual(
+ __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ __ Goto(&done, result);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
Node* value = node->InputAt(0);
- value = ObjectIsSmi(value);
- return ValueEffectControl(value, effect, control);
+ return ObjectIsSmi(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* vfalse = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_bit_field = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32Equal(), jsgraph()->Int32Constant(0),
- graph()->NewNode(
- machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
- jsgraph()->Int32Constant(0));
- }
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* vfalse = __ Word32Equal(
+ __ Word32Equal(__ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(1 << Map::kIsUndetectable))),
+ __ Int32Constant(0));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
- Node* control) {
- Node* value = node->InputAt(0);
-
- Node* value_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
- effect, control);
- value = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant(
- JSArrayBuffer::WasNeutered::kMask)),
- jsgraph()->Int32Constant(0)),
- jsgraph()->Int32Constant(0));
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
- Node* control) {
- Node* subject = node->InputAt(0);
- Node* index = node->InputAt(1);
-
- // We may need to loop several times for ConsString/SlicedString {subject}s.
- Node* loop =
- graph()->NewNode(common()->Loop(4), control, control, control, control);
- Node* lsubject =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
- subject, subject, subject, subject, loop);
- Node* lindex =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
- index, index, index, loop);
- Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
- effect, effect, loop);
-
- control = loop;
- effect = leffect;
-
- // Determine the instance type of {lsubject}.
- Node* lsubject_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- lsubject, effect, control);
- Node* lsubject_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- lsubject_map, effect, control);
-
- // Check if {lsubject} is a SeqString.
- Node* check0 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kSeqStringTag));
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
- Node* check1 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringEncodingMask)),
- jsgraph()->Int32Constant(kTwoByteStringTag));
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1 = etrue1 =
- graph()->NewNode(simplified()->LoadElement(
- AccessBuilder::ForSeqTwoByteStringCharacter()),
- lsubject, lindex, etrue1, if_true1);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = efalse1 =
- graph()->NewNode(simplified()->LoadElement(
- AccessBuilder::ForSeqOneByteStringCharacter()),
- lsubject, lindex, efalse1, if_false1);
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if the {lsubject} is a ConsString.
- Node* check1 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kConsStringTag));
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- {
- // Load the right hand side of the {lsubject} ConsString.
- Node* lsubject_second = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
- lsubject, etrue1, if_true1);
-
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we flatten the string first.
- Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
- jsgraph()->EmptyStringConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = etrue1;
- Node* vtrue2 = etrue2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
- lsubject, etrue2, if_true2);
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = etrue1;
- Node* vfalse2;
- {
- // Flatten the {lsubject} ConsString first.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kFlattenString;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- vfalse2 = efalse2 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
- efalse2, if_false2);
- }
+Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
+ int const formal_parameter_count = ParameterCountOf(node->op());
- // Retry the {loop} with the new subject.
- loop->ReplaceInput(1, if_true2);
- lindex->ReplaceInput(1, lindex);
- leffect->ReplaceInput(1, etrue2);
- lsubject->ReplaceInput(1, vtrue2);
- loop->ReplaceInput(2, if_false2);
- lindex->ReplaceInput(2, lindex);
- leffect->ReplaceInput(2, efalse2);
- lsubject->ReplaceInput(2, vfalse2);
- }
+ Callable const callable = CodeFactory::NewRestParameterElements(isolate());
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()),
+ __ IntPtrConstant(formal_parameter_count),
+ __ NoContextConstant());
+}
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Check if the {lsubject} is an ExternalString.
- Node* check2 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kExternalStringTag));
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = efalse1;
- Node* vtrue2;
- {
- // Check if the {lsubject} is a short external string.
- Node* check3 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kShortExternalStringMask)),
- jsgraph()->Int32Constant(0));
- Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check3, if_true2);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = etrue2;
- Node* vtrue3;
- {
- // Load the actual resource data from the {lsubject}.
- Node* lsubject_resource_data = etrue3 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForExternalStringResourceData()),
- lsubject, etrue3, if_true3);
-
- // Check if the {lsubject} is a TwoByteExternalString or a
- // OneByteExternalString.
- Node* check4 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringEncodingMask)),
- jsgraph()->Int32Constant(kTwoByteStringTag));
- Node* branch4 =
- graph()->NewNode(common()->Branch(), check4, if_true3);
-
- Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
- Node* etrue4 = etrue3;
- Node* vtrue4 = etrue4 = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForExternalTwoByteStringCharacter()),
- lsubject_resource_data, lindex, etrue4, if_true4);
-
- Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
- Node* efalse4 = etrue3;
- Node* vfalse4 = efalse4 = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForExternalOneByteStringCharacter()),
- lsubject_resource_data, lindex, efalse4, if_false4);
-
- if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
- etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
- if_true3);
- vtrue3 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue4, vfalse4, if_true3);
- }
+Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
+ int const formal_parameter_count = ParameterCountOf(node->op());
- Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
- Node* efalse3 = etrue2;
- Node* vfalse3;
- {
- // The {lsubject} might be compressed, call the runtime.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kExternalStringGetChar;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- vfalse3 = efalse3 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
- ChangeInt32ToSmi(lindex),
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
- efalse3, if_false3);
- vfalse3 = ChangeSmiToInt32(vfalse3);
- }
+ Callable const callable =
+ CodeFactory::NewUnmappedArgumentsElements(isolate());
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()),
+ __ IntPtrConstant(formal_parameter_count),
+ __ NoContextConstant());
+}
- if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
- etrue2 =
- graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
- vtrue2 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue3, vfalse3, if_true2);
- }
+Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
+ Node* value = node->InputAt(0);
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = efalse1;
- {
- // The {lsubject} is a SlicedString, continue with its parent.
- Node* lsubject_parent = efalse2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
- lsubject, efalse2, if_false2);
- Node* lsubject_offset = efalse2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
- lsubject, efalse2, if_false2);
- Node* lsubject_index = graph()->NewNode(
- machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
-
- // Retry the {loop} with the parent subject.
- loop->ReplaceInput(3, if_false2);
- leffect->ReplaceInput(3, efalse2);
- lindex->ReplaceInput(3, lsubject_index);
- lsubject->ReplaceInput(3, lsubject_parent);
- }
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
+ return __ Word32Equal(
+ __ Word32Equal(
+ __ Word32And(value_bit_field,
+ __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+ __ Int32Constant(0)),
+ __ Int32Constant(0));
+}
- if_false1 = if_true2;
- efalse1 = etrue2;
- vfalse1 = vtrue2;
- }
+Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
- if_false0 = if_false1;
- efalse0 = efalse1;
- vfalse0 = vfalse1;
- }
+ Callable const callable = CodeFactory::StringCharAt(isolate());
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
+}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
- vfalse0, control);
+Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
- return ValueEffectControl(value, effect, control);
+ Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+ MachineType::TaggedSigned());
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
+ auto runtime_call = __ MakeDeferredLabel<2>();
+ auto if_undefined = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
// Compute the character code.
- Node* code =
- graph()->NewNode(machine()->Word32And(), value,
- jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+ Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
// Check if the {code} is a one-byte char code.
- Node* check0 =
- graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
- jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
+ Node* check0 = __ Int32LessThanOrEqual(
+ code, __ Int32Constant(String::kMaxOneByteCharCode));
+ __ GotoUnless(check0, &runtime_call);
// Load the isolate wide single character string cache.
- Node* cache =
- jsgraph()->HeapConstant(factory()->single_character_string_cache());
+ Node* cache = __ HeapConstant(factory()->single_character_string_cache());
// Compute the {cache} index for {code}.
- Node* index = machine()->Is32()
- ? code
- : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+ Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
// Check if we have an entry for the {code} in the single character string
// cache already.
- Node* entry = etrue0 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
- index, etrue0, if_true0);
+ Node* entry =
+ __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
- Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
- jsgraph()->UndefinedConstant());
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
-
- // Use the {entry} from the {cache}.
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = entry;
+ Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
+ __ GotoIf(check1, &runtime_call);
+ __ Goto(&done, entry);
// Let %StringFromCharCode handle this case.
// TODO(turbofan): At some point we may consider adding a stub for this
// deferred case, so that we don't need to call to C++ here.
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1;
+ __ Bind(&runtime_call);
{
- if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
- etrue1 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringCharFromCode;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- vtrue1 = etrue1 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1),
- ChangeInt32ToSmi(code),
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
- if_true1);
+ Node* vtrue1 =
+ __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+ __ Goto(&done, vtrue1);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
Node* value = node->InputAt(0);
Node* code = value;
- Node* etrue0 = effect;
- Node* vtrue0;
+ auto if_not_single_code = __ MakeDeferredLabel<1>();
+ auto if_not_one_byte = __ MakeDeferredLabel<1>();
+ auto cache_miss = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<4>(MachineRepresentation::kTagged);
// Check if the {code} is a single code unit
- Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
- jsgraph()->Uint32Constant(0xFFFF));
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
+ __ GotoUnless(check0, &if_not_single_code);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
{
// Check if the {code} is a one byte character
- Node* check1 = graph()->NewNode(
- machine()->Uint32LessThanOrEqual(), code,
- jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1;
+ Node* check1 = __ Uint32LessThanOrEqual(
+ code, __ Uint32Constant(String::kMaxOneByteCharCode));
+ __ GotoUnless(check1, &if_not_one_byte);
{
// Load the isolate wide single character string cache.
- Node* cache =
- jsgraph()->HeapConstant(factory()->single_character_string_cache());
+ Node* cache = __ HeapConstant(factory()->single_character_string_cache());
// Compute the {cache} index for {code}.
- Node* index =
- machine()->Is32()
- ? code
- : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+ Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
// Check if we have an entry for the {code} in the single character string
// cache already.
- Node* entry = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
- cache, index, etrue1, if_true1);
-
- Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
- jsgraph()->UndefinedConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = etrue1;
- Node* vtrue2;
+ Node* entry =
+ __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
+
+ Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+ __ GotoIf(check2, &cache_miss);
+
+ // Use the {entry} from the {cache}.
+ __ Goto(&done, entry);
+
+ __ Bind(&cache_miss);
{
// Allocate a new SeqOneByteString for {code}.
- vtrue2 = etrue2 = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
- if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
- jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
- if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
- jsgraph()->SmiConstant(1), etrue2, if_true2);
- etrue2 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
- kNoWriteBarrier)),
- vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag),
- code, etrue2, if_true2);
+ Node* vtrue2 = __ Allocate(
+ NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vtrue2,
+ __ HeapConstant(factory()->one_byte_string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
+ __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ vtrue2,
+ __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ code);
// Remember it in the {cache}.
- etrue2 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
- cache, index, vtrue2, etrue2, if_true2);
+ __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
+ vtrue2);
+ __ Goto(&done, vtrue2);
}
-
- // Use the {entry} from the {cache}.
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = etrue0;
- Node* vfalse2 = entry;
-
- if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- etrue1 =
- graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
- vtrue1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue2, vfalse2, if_true1);
}
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = effect;
- Node* vfalse1;
+ __ Bind(&if_not_one_byte);
{
// Allocate a new SeqTwoByteString for {code}.
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
- if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
- jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
- jsgraph()->SmiConstant(1), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
- kNoWriteBarrier)),
- vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- code, efalse1, if_false1);
+ Node* vfalse1 = __ Allocate(
+ NOT_TENURED, __ Int32Constant(SeqTwoByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vfalse1,
+ __ HeapConstant(factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
+ __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
+ vfalse1,
+ __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ code);
+ __ Goto(&done, vfalse1);
}
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_true0);
}
+ __ Bind(&if_not_single_code);
// Generate surrogate pair string
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
{
switch (UnicodeEncodingOf(node->op())) {
case UnicodeEncoding::UTF16:
@@ -2666,553 +1975,359 @@ EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
case UnicodeEncoding::UTF32: {
// Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+ Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
// lead = (codepoint >> 10) + LEAD_OFFSET
Node* lead =
- graph()->NewNode(machine()->Int32Add(),
- graph()->NewNode(machine()->Word32Shr(), code,
- jsgraph()->Int32Constant(10)),
- lead_offset);
+ __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
// trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail =
- graph()->NewNode(machine()->Int32Add(),
- graph()->NewNode(machine()->Word32And(), code,
- jsgraph()->Int32Constant(0x3FF)),
- jsgraph()->Int32Constant(0xDC00));
+ Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+ __ Int32Constant(0xDC00));
// codpoint = (trail << 16) | lead;
- code = graph()->NewNode(machine()->Word32Or(),
- graph()->NewNode(machine()->Word32Shl(), trail,
- jsgraph()->Int32Constant(16)),
- lead);
+ code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
break;
}
}
// Allocate a new SeqTwoByteString for {code}.
- vfalse0 = efalse0 =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
- efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
- jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
- jsgraph()->SmiConstant(2), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
- kNoWriteBarrier)),
- vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- code, efalse0, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
-}
+ Node* vfalse0 = __ Allocate(NOT_TENURED,
+ __ Int32Constant(SeqTwoByteString::SizeFor(2)));
+ __ StoreField(AccessBuilder::ForMap(), vfalse0,
+ __ HeapConstant(factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ vfalse0,
+ __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ code);
+ __ Goto(&done, vfalse0);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+ Node* node) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringComparison(Callable const& callable,
- Node* node, Node* effect,
- Node* control) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
- node->AppendInput(graph()->zone(), effect);
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return ValueEffectControl(node, node, control);
+ return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ __ NoContextConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
- Node* control) {
- return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
- effect, control);
+Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
+ return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
- Node* control) {
- return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
- effect, control);
+Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
+ return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
- node, effect, control);
+ node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
+ Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
// with allow-return-hole, we cannot do anything, so just deoptimize
// in case of the hole NaN (similar to Crankshaft).
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(kHoleNanUpper32));
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TheHoleConstant());
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* check = __ WordEqual(value, __ TheHoleConstant());
+ __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->UndefinedConstant();
+ auto if_is_hole = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = value;
+ Node* check = __ WordEqual(value, __ TheHoleConstant());
+ __ GotoIf(check, &if_is_hole);
+ __ Goto(&done, value);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_is_hole);
+ __ Goto(&done, __ UndefinedConstant());
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
- Node* control) {
- Node* result = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- result, jsgraph()->HeapNumberMapConstant(), effect,
- control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
- value, effect, control);
- return ValueEffectControl(result, effect, control);
+Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
+ Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
+ __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
+ __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
+ return result;
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ value = __ ChangeInt32ToInt64(value);
}
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+ return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+ value = __ ChangeUint32ToUint64(value);
}
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+ return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+ value = __ WordSar(value, SmiShiftBitsConstant());
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+ value = __ TruncateInt64ToInt32(value);
}
return value;
}
+
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
- return graph()->NewNode(
- machine()->WordEqual(),
- graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask)),
- jsgraph()->IntPtrConstant(kSmiTag));
+ return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
+ __ IntPtrConstant(kSmiTag));
}
Node* EffectControlLinearizer::SmiMaxValueConstant() {
- return jsgraph()->Int32Constant(Smi::kMaxValue);
+ return __ Int32Constant(Smi::kMaxValue);
}
Node* EffectControlLinearizer::SmiShiftBitsConstant() {
- return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
Node* value = node->InputAt(0);
- Node* result = effect =
- graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
- value, jsgraph()->NoContextConstant(), effect);
- return ValueEffectControl(result, effect, control);
+ return __ ToNumber(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto if_to_number_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- vfalse0 = efalse0 = graph()->NewNode(
- ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
- jsgraph()->NoContextConstant(), efalse0);
-
- Node* check1 = ObjectIsSmi(vfalse0);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1 = ChangeSmiToInt32(vfalse0);
+ __ Bind(&if_not_smi);
+ Node* to_number = __ ToNumber(value);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
- efalse1, if_false1);
- vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
- }
+ Node* check1 = ObjectIsSmi(to_number);
+ __ GotoIf(check1, &if_to_number_smi);
+ Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+ __ Goto(&done, __ TruncateFloat64ToWord32(number));
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_false0);
- }
+ __ Bind(&if_to_number_smi);
+ __ Goto(&done, ChangeSmiToInt32(to_number));
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue0, vfalse0, control);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- vtrue0 = ChangeSmiToInt32(value);
- vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- vfalse0 = efalse0 = graph()->NewNode(
- ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
- jsgraph()->NoContextConstant(), efalse0);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto if_to_number_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>(MachineRepresentation::kFloat64);
- Node* check1 = ObjectIsSmi(vfalse0);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ Node* from_smi = ChangeSmiToInt32(value);
+ __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- vtrue1 = ChangeSmiToInt32(vfalse0);
- vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
- }
+ __ Bind(&if_not_smi);
+ Node* to_number = __ ToNumber(value);
+ Node* check1 = ObjectIsSmi(to_number);
+ __ GotoIf(check1, &if_to_number_smi);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
- efalse1, if_false1);
- }
+ Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+ __ Goto(&done, number);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
+ __ Bind(&if_to_number_smi);
+ Node* number_from_smi = ChangeSmiToInt32(to_number);
+ number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
+ __ Goto(&done, number_from_smi);
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
+ auto if_not_fixed_array = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
// Load the current map of {elements}.
- Node* elements_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- elements, effect, control);
+ Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
// Check if {elements} is not a copy-on-write FixedArray.
- Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
- jsgraph()->FixedArrayMapConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+ Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
+ __ GotoUnless(check, &if_not_fixed_array);
// Nothing to do if the {elements} are not copy-on-write.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = elements;
+ __ Goto(&done, elements);
+ __ Bind(&if_not_fixed_array);
// We need to take a copy of the {elements} and set them up for {object}.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // We need to create a copy of the {elements} for {object}.
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
- vfalse = efalse = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
- jsgraph()->NoContextConstant(), efalse);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
+ __ NoContextConstant());
+ __ Goto(&done, result);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+ Node* frame_state) {
GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
Node* length = node->InputAt(3);
- Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
- ? machine()->Uint32LessThanOrEqual()
- : machine()->Word32Equal(),
- length, index);
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto if_not_grow = __ MakeLabel<1>();
+ auto if_not_grow_backing_store = __ MakeLabel<1>();
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = elements;
+ Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
+ ? __ Uint32LessThanOrEqual(length, index)
+ : __ Word32Equal(length, index);
+ __ GotoUnless(check0, &if_not_grow);
{
// Load the length of the {elements} backing store.
- Node* elements_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
- etrue0, if_true0);
+ Node* elements_length =
+ __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
elements_length = ChangeSmiToInt32(elements_length);
// Check if we need to grow the {elements} backing store.
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1 = vtrue0;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = vtrue0;
- {
- // We need to grow the {elements} for {object}.
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable =
- (flags & GrowFastElementsFlag::kDoubleElements)
- ? CodeFactory::GrowFastDoubleElements(isolate())
- : CodeFactory::GrowFastSmiOrObjectElements(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
- vfalse1 = efalse1 = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
- efalse1);
-
- // Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- efalse1 = if_false1 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
- frame_state, efalse1, if_false1);
- }
+ Node* check1 = __ Uint32LessThan(index, elements_length);
+ __ GotoUnless(check1, &if_not_grow_backing_store);
+ __ Goto(&done_grow, elements);
+
+ __ Bind(&if_not_grow_backing_store);
+ // We need to grow the {elements} for {object}.
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable =
+ (flags & GrowFastElementsFlag::kDoubleElements)
+ ? CodeFactory::GrowFastDoubleElements(isolate())
+ : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
+ properties);
+ Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
+ ChangeInt32ToSmi(index), __ NoContextConstant());
+
+ // Ensure that we were able to grow the {elements}.
+ // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+ // but maybe we should just introduce a reason that makes sense.
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
+ frame_state);
+ __ Goto(&done_grow, new_object);
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&done_grow);
// For JSArray {object}s we also need to update the "length".
if (flags & GrowFastElementsFlag::kArrayObject) {
// Compute the new {length}.
- Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
- machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+ Node* object_length =
+ ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
// Update the "length" property of the {object}.
- etrue0 =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
- object, object_length, etrue0, if_true0);
+ __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+ object_length);
}
+ __ Goto(&done, done_grow.PhiAt(0));
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0 = elements;
+ __ Bind(&if_not_grow);
{
// In case of non-holey {elements}, we need to verify that the {index} is
// in-bounds, otherwise for holey {elements}, the check above already
// guards the index (and the operator forces {index} to be unsigned).
if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), index, length);
- efalse0 = if_false0 = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
- frame_state, efalse0, if_false0);
+ Node* check1 = __ Uint32LessThan(index, length);
+ __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check1, frame_state);
}
+ __ Goto(&done, elements);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
- vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
- Node* control) {
+void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
ElementsTransition const transition = ElementsTransitionOf(node->op());
Node* object = node->InputAt(0);
- Node* source_map = node->InputAt(1);
- Node* target_map = node->InputAt(2);
+
+ auto if_map_same = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>();
+
+ Node* source_map = __ HeapConstant(transition.source());
+ Node* target_map = __ HeapConstant(transition.target());
// Load the current map of {object}.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
- effect, control);
+ Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
// Check if {object_map} is the same as {source_map}.
- Node* check =
- graph()->NewNode(machine()->WordEqual(), object_map, source_map);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- // Migrate the {object} from {source_map} to {target_map}.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- switch (transition) {
- case ElementsTransition::kFastTransition: {
- // In-place migration of {object}, just store the {target_map}.
- etrue =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- object, target_map, etrue, if_true);
- break;
- }
- case ElementsTransition::kSlowTransition: {
- // Instance migration, call out to the runtime for {object}.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- etrue = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
- target_map,
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
- if_true);
- break;
- }
+ Node* check = __ WordEqual(object_map, source_map);
+ __ GotoIf(check, &if_map_same);
+ __ Goto(&done);
+
+ __ Bind(&if_map_same);
+ switch (transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ // In-place migration of {object}, just store the {target_map}.
+ __ StoreField(AccessBuilder::ForMap(), object, target_map);
+ break;
+ case ElementsTransition::kSlowTransition: {
+ // Instance migration, call out to the runtime for {object}.
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ break;
}
}
+ __ Goto(&done);
- // Nothing to do if the {object} doesn't have the {source_map}.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
- return ValueEffectControl(nullptr, effect, control);
+ __ Bind(&done);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* base = node->InputAt(1);
@@ -3221,24 +2336,20 @@ EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
- effect = graph()->NewNode(common()->Retain(), buffer, effect);
+ __ Retain(buffer);
- // Compute the effective storage pointer.
- Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
- external, effect, control);
+ // Compute the effective storage pointer, handling the case where the
+ // {external} pointer is the effective storage pointer (i.e. the {base}
+ // is Smi zero).
+ Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+ base, external);
// Perform the actual typed element access.
- Node* value = effect = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForTypedArrayElement(array_type, true)),
- storage, index, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+ storage, index);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
- Node* control) {
+void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* base = node->InputAt(1);
@@ -3248,34 +2359,25 @@ EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
- effect = graph()->NewNode(common()->Retain(), buffer, effect);
+ __ Retain(buffer);
- // Compute the effective storage pointer.
- Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
- external, effect, control);
+ // Compute the effective storage pointer, handling the case where the
+ // {external} pointer is the effective storage pointer (i.e. the {base}
+ // is Smi zero).
+ Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+ base, external);
// Perform the actual typed element access.
- effect = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForTypedArrayElement(array_type, true)),
- storage, index, value, effect, control);
-
- return ValueEffectControl(nullptr, effect, control);
+ __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+ storage, index, value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
Node* const input = node->InputAt(0);
// General case for ceil.
@@ -3300,251 +2402,169 @@ EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
// let temp2 = (2^52 + temp1) - 2^52 in
// let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
// -0 - temp3
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+ auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+ Node* const one = __ Float64Constant(1.0);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, input),
- graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(temp1, input), &done, temp1);
+ __ Goto(&done, __ Float64Add(temp1, one));
}
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ __ Bind(&if_not_positive);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
{
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp1, temp2);
+ __ GotoUnless(check3, &done_temp3, temp2);
+ __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+ __ Bind(&done_temp3);
+ Node* temp3 = done_temp3.PhiAt(0);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp3));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
- return ValueEffectControl(value, effect, merge0);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
- Node* control) {
- if (machine()->Float64RoundDown().IsSupported()) {
- value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
- } else {
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_one = jsgraph()->Float64Constant(-1.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 =
- jsgraph()->Float64Constant(-4503599627370496.0E0);
- Node* const input = value;
-
- // General case for floor.
- //
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if input < temp1 then
- // temp1 - 1
- // else
- // temp1
- // else
- // if input == 0 then
- // input
- // else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // if temp2 < temp1 then
- // -1 - temp2
- // else
- // -0 - temp2
- //
- // Note: We do not use the Diamond helper class here, because it really
- // hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
- }
+Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
+ Node* round_down = __ Float64RoundDown(value);
+ if (round_down != nullptr) {
+ return round_down;
+ }
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
+ Node* const input = value;
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ // General case for floor.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // if temp2 < temp1 then
+ // -1 - temp2
+ // else
+ // -0 - temp2
+
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_temp2_lt_temp1 = __ MakeLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<7>(MachineRepresentation::kFloat64);
+
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* const one = __ Float64Constant(1.0);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+ __ Goto(&done, __ Float64Sub(temp1, one));
+ }
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
+ }
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- vfalse2 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
- graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
- graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
- }
+ __ Bind(&if_not_positive);
+ {
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ {
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp2, temp1);
+ __ GotoIf(check3, &if_temp2_lt_temp1);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp2));
+
+ __ Bind(&if_temp2_lt_temp1);
+ __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundDown().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
Node* const input = node->InputAt(0);
- return BuildFloat64RoundDown(input, effect, control);
+ return Just(BuildFloat64RoundDown(input));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundTiesEven().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const two = jsgraph()->Float64Constant(2.0);
- Node* const half = jsgraph()->Float64Constant(0.5);
- Node* const zero = jsgraph()->Float64Constant(0.0);
Node* const input = node->InputAt(0);
// Generate case for round ties to even:
@@ -3561,79 +2581,38 @@ EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
// value
// else
// value + 1.0
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- ValueEffectControl continuation =
- BuildFloat64RoundDown(input, effect, control);
- Node* value = continuation.value;
- effect = continuation.effect;
- control = continuation.control;
- Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
+ auto if_is_half = __ MakeLabel<1>();
+ auto done = __ MakeLabel<4>(MachineRepresentation::kFloat64);
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+ Node* value = BuildFloat64RoundDown(input);
+ Node* temp1 = __ Float64Sub(input, value);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0 = value;
+ Node* const half = __ Float64Constant(0.5);
+ Node* check0 = __ Float64LessThan(temp1, half);
+ __ GotoIf(check0, &done, value);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
-
- Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = value;
+ Node* const one = __ Float64Constant(1.0);
+ Node* check1 = __ Float64LessThan(half, temp1);
+ __ GotoUnless(check1, &if_is_half);
+ __ Goto(&done, __ Float64Add(value, one));
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
+ __ Bind(&if_is_half);
+ Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
+ Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
+ __ GotoIf(check2, &done, value);
+ __ Goto(&done, __ Float64Add(value, one));
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundTruncate().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
Node* const input = node->InputAt(0);
// General case for trunc.
@@ -3662,92 +2641,65 @@ EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+ auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+ Node* const one = __ Float64Constant(1.0);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+ __ Goto(&done, __ Float64Sub(temp1, one));
}
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ __ Bind(&if_not_positive);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
{
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp1, temp2);
+ __ GotoUnless(check3, &done_temp3, temp2);
+ __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+ __ Bind(&done_temp3);
+ Node* temp3 = done_temp3.PhiAt(0);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp3));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
- return ValueEffectControl(value, effect, merge0);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
+#undef __
+
Factory* EffectControlLinearizer::factory() const {
return isolate()->factory();
}
@@ -3756,18 +2708,6 @@ Isolate* EffectControlLinearizer::isolate() const {
return jsgraph()->isolate();
}
-Operator const* EffectControlLinearizer::ToNumberOperator() {
- if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- Operator::kEliminatable);
- to_number_operator_.set(common()->Call(desc));
- }
- return to_number_operator_.get();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 4ed03c6815..9d991cfb4b 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/globals.h"
@@ -38,174 +39,90 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void ProcessNode(Node* node, Node** frame_state, Node** effect,
Node** control);
- struct ValueEffectControl {
- Node* value;
- Node* effect;
- Node* control;
- ValueEffectControl(Node* value, Node* effect, Node* control)
- : value(value), effect(effect), control(control) {}
- };
-
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
- ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
- ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
- ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
- Node* control);
+ Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToTaggedSigned(Node* node);
+ Node* LowerChangeInt32ToTagged(Node* node);
+ Node* LowerChangeUint32ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeTaggedSignedToInt32(Node* node);
+ Node* LowerChangeTaggedToBit(Node* node);
+ Node* LowerChangeTaggedToInt32(Node* node);
+ Node* LowerChangeTaggedToUint32(Node* node);
+ Node* LowerCheckBounds(Node* node, Node* frame_state);
+ Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
+ Node* LowerCheckMaps(Node* node, Node* frame_state);
+ Node* LowerCheckNumber(Node* node, Node* frame_state);
+ Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckIf(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerChangeTaggedToFloat64(Node* node);
+ Node* LowerTruncateTaggedToBit(Node* node);
+ Node* LowerTruncateTaggedToFloat64(Node* node);
+ Node* LowerTruncateTaggedToWord32(Node* node);
+ Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+ Node* LowerObjectIsCallable(Node* node);
+ Node* LowerObjectIsNumber(Node* node);
+ Node* LowerObjectIsReceiver(Node* node);
+ Node* LowerObjectIsSmi(Node* node);
+ Node* LowerObjectIsString(Node* node);
+ Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNewRestParameterElements(Node* node);
+ Node* LowerNewUnmappedArgumentsElements(Node* node);
+ Node* LowerArrayBufferWasNeutered(Node* node);
+ Node* LowerStringCharAt(Node* node);
+ Node* LowerStringCharCodeAt(Node* node);
+ Node* LowerStringFromCharCode(Node* node);
+ Node* LowerStringFromCodePoint(Node* node);
+ Node* LowerStringEqual(Node* node);
+ Node* LowerStringLessThan(Node* node);
+ Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
+ Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+ Node* LowerConvertTaggedHoleToUndefined(Node* node);
+ Node* LowerPlainPrimitiveToNumber(Node* node);
+ Node* LowerPlainPrimitiveToWord32(Node* node);
+ Node* LowerPlainPrimitiveToFloat64(Node* node);
+ Node* LowerEnsureWritableFastElements(Node* node);
+ Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
+ void LowerTransitionElementsKind(Node* node);
+ Node* LowerLoadTypedElement(Node* node);
+ void LowerStoreTypedElement(Node* node);
// Lowering of optional operators.
- ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
- Node* control);
-
- ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
- Node* control);
- ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
- Node* value, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
- Node* control);
- ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
- Node* effect, Node* control);
+ Maybe<Node*> LowerFloat64RoundUp(Node* node);
+ Maybe<Node*> LowerFloat64RoundDown(Node* node);
+ Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
+ Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
+
+ Node* AllocateHeapNumberWithValue(Node* node);
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+ Node* frame_state);
+ Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ Node* value,
+ Node* frame_state);
+ Node* BuildFloat64RoundDown(Node* value);
+ Node* LowerStringComparison(Callable const& callable, Node* node);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
- Node* ChangeInt32ToFloat64(Node* value);
- Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
@@ -222,15 +139,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
- Operator const* ToNumberOperator();
+ GraphAssembler* gasm() { return &graph_assembler_; }
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
-
- SetOncePointer<Operator const> to_number_operator_;
+ GraphAssembler graph_assembler_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index f7708f85da..10b7f285a6 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -31,7 +31,7 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
-Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
fully_reduced_.Contains(node->id())) {
return NoChange();
@@ -61,8 +61,7 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
break;
}
bool depends_on_object_state = false;
- for (int i = 0; i < node->InputCount(); i++) {
- Node* input = node->InputAt(i);
+ for (Node* input : node->inputs()) {
switch (input->opcode()) {
case IrOpcode::kAllocate:
case IrOpcode::kFinishRegion:
@@ -97,9 +96,18 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
return NoChange();
}
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ Reduction reduction = ReduceNode(node);
+ if (reduction.Changed() && node != reduction.replacement()) {
+ escape_analysis()->SetReplacement(node, reduction.replacement());
+ }
+ return reduction;
+}
+
namespace {
-Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+Node* MaybeGuard(JSGraph* jsgraph, Zone* zone, Node* original,
+ Node* replacement) {
// We might need to guard the replacement if the type of the {replacement}
// node is not in a sub-type relation to the type of the the {original} node.
Type* const replacement_type = NodeProperties::GetType(replacement);
@@ -108,10 +116,18 @@ Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
Node* const control = NodeProperties::GetControlInput(original);
replacement = jsgraph->graph()->NewNode(
jsgraph->common()->TypeGuard(original_type), replacement, control);
+ NodeProperties::SetType(replacement, original_type);
}
return replacement;
}
+Node* SkipTypeGuards(Node* node) {
+ while (node->opcode() == IrOpcode::kTypeGuard) {
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ return node;
+}
+
} // namespace
Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
@@ -120,12 +136,13 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
fully_reduced_.Add(node->id());
}
- if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (escape_analysis()->IsVirtual(
+ SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
if (Node* rep = escape_analysis()->GetReplacement(node)) {
isolate()->counters()->turbo_escape_loads_replaced()->Increment();
TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
- rep = MaybeGuard(jsgraph(), node, rep);
+ rep = MaybeGuard(jsgraph(), zone(), node, rep);
ReplaceWithValue(node, rep);
return Replace(rep);
}
@@ -140,7 +157,8 @@ Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
fully_reduced_.Add(node->id());
}
- if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (escape_analysis()->IsVirtual(
+ SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
TRACE("Removed #%d (%s) from effect chain\n", node->id(),
node->op()->mnemonic());
RelaxEffectsAndControls(node);
@@ -195,14 +213,14 @@ Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
- Node* left = NodeProperties::GetValueInput(node, 0);
- Node* right = NodeProperties::GetValueInput(node, 1);
+ Node* left = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
+ Node* right = SkipTypeGuards(NodeProperties::GetValueInput(node, 1));
if (escape_analysis()->IsVirtual(left)) {
if (escape_analysis()->IsVirtual(right) &&
escape_analysis()->CompareVirtualObjects(left, right)) {
ReplaceWithValue(node, jsgraph()->TrueConstant());
TRACE("Replaced ref eq #%d with true\n", node->id());
- Replace(jsgraph()->TrueConstant());
+ return Replace(jsgraph()->TrueConstant());
}
// Right-hand side is not a virtual object, or a different one.
ReplaceWithValue(node, jsgraph()->FalseConstant());
@@ -220,7 +238,7 @@ Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
- Node* input = NodeProperties::GetValueInput(node, 0);
+ Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
if (escape_analysis()->IsVirtual(input)) {
ReplaceWithValue(node, jsgraph()->FalseConstant());
TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
@@ -313,7 +331,7 @@ Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
bool node_multiused,
bool already_cloned,
bool multiple_users) {
- Node* input = NodeProperties::GetValueInput(node, node_index);
+ Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, node_index));
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
fully_reduced_.Contains(node->id())) {
return nullptr;
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 61e7607a36..746d84030e 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
bool compilation_failed() const { return compilation_failed_; }
private:
+ Reduction ReduceNode(Node* node);
Reduction ReduceLoad(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceAllocate(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 0218045971..52c7e74c10 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -12,6 +12,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -201,7 +202,7 @@ class VirtualObject : public ZoneObject {
}
bool UpdateFrom(const VirtualObject& other);
bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common);
+ CommonOperatorBuilder* common, bool initialMerge);
void SetObjectState(Node* node) { object_state_ = node; }
Node* GetObjectState() const { return object_state_; }
bool IsCopyRequired() const { return status_ & kCopyRequired; }
@@ -252,10 +253,14 @@ bool VirtualObject::UpdateFrom(const VirtualObject& other) {
class VirtualState : public ZoneObject {
public:
VirtualState(Node* owner, Zone* zone, size_t size)
- : info_(size, nullptr, zone), owner_(owner) {}
+ : info_(size, nullptr, zone),
+ initialized_(static_cast<int>(size), zone),
+ owner_(owner) {}
VirtualState(Node* owner, const VirtualState& state)
: info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ initialized_(state.initialized_.length(),
+ state.info_.get_allocator().zone()),
owner_(owner) {
for (size_t i = 0; i < info_.size(); ++i) {
if (state.info_[i]) {
@@ -280,6 +285,7 @@ class VirtualState : public ZoneObject {
private:
ZoneVector<VirtualObject*> info_;
+ BitVector initialized_;
Node* owner_;
DISALLOW_COPY_AND_ASSIGN(VirtualState);
@@ -375,6 +381,7 @@ VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
info_[alias] = obj;
+ if (obj) initialized_.Add(alias);
}
bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
@@ -431,7 +438,6 @@ bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
}
return true;
}
-
} // namespace
bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -440,12 +446,21 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
int value_input_count = static_cast<int>(cache->fields().size());
Node* rep = GetField(i);
if (!rep || !IsCreatedPhi(i)) {
+ Type* phi_type = Type::None();
+ for (Node* input : cache->fields()) {
+ CHECK_NOT_NULL(input);
+ CHECK(!input->IsDead());
+ Type* input_type = NodeProperties::GetType(input);
+ phi_type = Type::Union(phi_type, input_type, graph->zone());
+ }
Node* control = NodeProperties::GetControlInput(at);
cache->fields().push_back(control);
Node* phi = graph->NewNode(
common->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache->fields().front());
+ NodeProperties::SetType(phi, phi_type);
SetField(i, phi, true);
+
#ifdef DEBUG
if (FLAG_trace_turbo_escape) {
PrintF(" Creating Phi #%d as merge of", phi->id());
@@ -471,12 +486,15 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
}
bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common) {
+ CommonOperatorBuilder* common,
+ bool initialMerge) {
DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
at->opcode() == IrOpcode::kPhi);
bool changed = false;
for (size_t i = 0; i < field_count(); ++i) {
- if (Node* field = cache->GetFields(i)) {
+ if (!initialMerge && GetField(i) == nullptr) continue;
+ Node* field = cache->GetFields(i);
+ if (field && !IsCreatedPhi(i)) {
changed = changed || GetField(i) != field;
SetField(i, field);
TRACE(" Field %zu agree on rep #%d\n", i, field->id());
@@ -516,8 +534,11 @@ bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
fields = std::min(obj->field_count(), fields);
}
}
- if (cache->objects().size() == cache->states().size()) {
+ if (cache->objects().size() == cache->states().size() &&
+ (mergeObject || !initialized_.Contains(alias))) {
+ bool initialMerge = false;
if (!mergeObject) {
+ initialMerge = true;
VirtualObject* obj = new (zone)
VirtualObject(cache->objects().front()->id(), this, zone, fields,
cache->objects().front()->IsInitialized());
@@ -542,7 +563,9 @@ bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
PrintF("\n");
}
#endif // DEBUG
- changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
+ changed =
+ mergeObject->MergeFrom(cache, at, graph, common, initialMerge) ||
+ changed;
} else {
if (mergeObject) {
TRACE(" Alias %d, virtual object removed\n", alias);
@@ -795,6 +818,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kSelect:
// TODO(mstarzinger): The following list of operators will eventually be
// handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+ case IrOpcode::kConvertTaggedHoleToUndefined:
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -802,6 +826,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kPlainPrimitiveToNumber:
case IrOpcode::kPlainPrimitiveToWord32:
case IrOpcode::kPlainPrimitiveToFloat64:
+ case IrOpcode::kStringCharAt:
case IrOpcode::kStringCharCodeAt:
case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
@@ -863,7 +888,11 @@ EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
virtual_states_(zone),
replacements_(zone),
cycle_detection_(zone),
- cache_(nullptr) {}
+ cache_(nullptr) {
+ // Type slot_not_analyzed_ manually.
+ double v = OpParameter<double>(slot_not_analyzed_);
+ NodeProperties::SetType(slot_not_analyzed_, Type::Range(v, v, zone));
+}
EscapeAnalysis::~EscapeAnalysis() {}
@@ -966,6 +995,7 @@ void EscapeAnalysis::RunObjectAnalysis() {
// VirtualObjects, and we want to delay phis to improve performance.
if (use->opcode() == IrOpcode::kEffectPhi) {
if (!status_analysis_->IsInQueue(use->id())) {
+ status_analysis_->SetInQueue(use->id(), true);
queue.push_front(use);
}
} else if ((use->opcode() != IrOpcode::kLoadField &&
@@ -1044,6 +1074,19 @@ bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
return false;
}
+namespace {
+
+bool HasFrameStateInput(const Operator* op) {
+ if (op->opcode() == IrOpcode::kCall || op->opcode() == IrOpcode::kTailCall) {
+ const CallDescriptor* d = CallDescriptorOf(op);
+ return d->NeedsFrameState();
+ } else {
+ return OperatorProperties::HasFrameStateInput(op);
+ }
+}
+
+} // namespace
+
bool EscapeAnalysis::Process(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
@@ -1080,6 +1123,9 @@ bool EscapeAnalysis::Process(Node* node) {
ProcessAllocationUsers(node);
break;
}
+ if (HasFrameStateInput(node->op())) {
+ virtual_states_[node->id()]->SetCopyRequired();
+ }
return true;
}
@@ -1173,8 +1219,7 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
static_cast<void*>(virtual_states_[effect->id()]),
effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
node->id());
- if (status_analysis_->IsEffectBranchPoint(effect) ||
- OperatorProperties::HasFrameStateInput(node->op())) {
+ if (status_analysis_->IsEffectBranchPoint(effect)) {
virtual_states_[node->id()]->SetCopyRequired();
TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
effect->id());
@@ -1393,10 +1438,16 @@ void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
Node* rep = replacement(load);
if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
int value_input_count = static_cast<int>(cache_->fields().size());
+ Type* phi_type = Type::None();
+ for (Node* input : cache_->fields()) {
+ Type* input_type = NodeProperties::GetType(input);
+ phi_type = Type::Union(phi_type, input_type, graph()->zone());
+ }
cache_->fields().push_back(NodeProperties::GetControlInput(from));
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache_->fields().front());
+ NodeProperties::SetType(phi, phi_type);
status_analysis_->ResizeStatusVector();
SetReplacement(load, phi);
TRACE(" got phi created.\n");
@@ -1583,13 +1634,14 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
cache_->fields().clear();
for (size_t i = 0; i < vobj->field_count(); ++i) {
if (Node* field = vobj->GetField(i)) {
- cache_->fields().push_back(field);
+ cache_->fields().push_back(ResolveReplacement(field));
}
}
int input_count = static_cast<int>(cache_->fields().size());
Node* new_object_state =
graph()->NewNode(common()->ObjectState(input_count), input_count,
&cache_->fields().front());
+ NodeProperties::SetType(new_object_state, Type::OtherInternal());
vobj->SetObjectState(new_object_state);
TRACE(
"Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index b85efe7349..34960dde83 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -35,6 +35,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
Node* GetOrCreateObjectState(Node* effect, Node* node);
bool IsCyclicObjectState(Node* effect, Node* node);
bool ExistsVirtualAllocate();
+ bool SetReplacement(Node* node, Node* rep);
private:
void RunObjectAnalysis();
@@ -59,7 +60,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
Node* replacement(Node* node);
Node* ResolveReplacement(Node* node);
- bool SetReplacement(Node* node, Node* rep);
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index bb17d1215f..dd8db83dd5 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -114,13 +114,36 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
}
}
- // Propagate towards start ("upwards") if there are successors and all of
- // them need a frame.
- for (RpoNumber& succ : block->successors()) {
- if (!InstructionBlockAt(succ)->needs_frame()) return false;
+ // Propagate towards start ("upwards")
+ bool need_frame_successors = false;
+ if (block->SuccessorCount() == 1) {
+ // For single successors, propagate the needs_frame information.
+ need_frame_successors =
+ InstructionBlockAt(block->successors()[0])->needs_frame();
+ } else {
+ // For multiple successors, each successor must only have a single
+ // predecessor (because the graph is in edge-split form), so each successor
+ // can independently create/dismantle a frame if needed. Given this
+ // independent control, only propagate needs_frame if all non-deferred
+ // blocks need a frame.
+ for (RpoNumber& succ : block->successors()) {
+ InstructionBlock* successor_block = InstructionBlockAt(succ);
+ DCHECK_EQ(1, successor_block->PredecessorCount());
+ if (!successor_block->IsDeferred()) {
+ if (successor_block->needs_frame()) {
+ need_frame_successors = true;
+ } else {
+ return false;
+ }
+ }
+ }
+ }
+ if (need_frame_successors) {
+ block->mark_needs_frame();
+ return true;
+ } else {
+ return false;
}
- block->mark_needs_frame();
- return true;
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index a02fb0121c..ec014dac94 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -6,6 +6,7 @@
#include "src/base/functional.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 8d463dfb78..a4d6829cfa 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -113,9 +113,9 @@ class Frame : public ZoneObject {
int AllocateSpillSlot(int width) {
int frame_slot_count_before = frame_slot_count_;
- int slot = AllocateAlignedFrameSlot(width);
- spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
- return slot;
+ AllocateAlignedFrameSlots(width);
+ spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
+ return frame_slot_count_ - 1;
}
int AlignFrame(int alignment = kDoubleSize);
@@ -131,23 +131,15 @@ class Frame : public ZoneObject {
static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
private:
- int AllocateAlignedFrameSlot(int width) {
- DCHECK(width == 4 || width == 8 || width == 16);
- if (kPointerSize == 4) {
- // Skip one slot if necessary.
- if (width > kPointerSize) {
- frame_slot_count_++;
- frame_slot_count_ |= 1;
- // 2 extra slots if width == 16.
- frame_slot_count_ += (width & 16) / 8;
- }
- } else {
- // No alignment when slots are 8 bytes.
- DCHECK_EQ(8, kPointerSize);
- // 1 extra slot if width == 16.
- frame_slot_count_ += (width & 16) / 16;
- }
- return frame_slot_count_++;
+ void AllocateAlignedFrameSlots(int width) {
+ DCHECK_LT(0, width);
+ int new_frame_slots = (width + kPointerSize - 1) / kPointerSize;
+ // Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
+ // multiple of 16.
+ int align_to = (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kPointerSize;
+ frame_slot_count_ =
+ RoundUp(frame_slot_count_ + new_frame_slots, align_to / kPointerSize);
+ DCHECK_LT(0, frame_slot_count_);
}
private:
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
new file mode 100644
index 0000000000..235826e746
--- /dev/null
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -0,0 +1,287 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphAssembler::GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control,
+ Zone* zone)
+ : temp_zone_(zone),
+ jsgraph_(jsgraph),
+ current_effect_(effect),
+ current_control_(control) {}
+
+Node* GraphAssembler::IntPtrConstant(intptr_t value) {
+ return jsgraph()->IntPtrConstant(value);
+}
+
+Node* GraphAssembler::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+}
+
+Node* GraphAssembler::SmiConstant(int32_t value) {
+ return jsgraph()->SmiConstant(value);
+}
+
+Node* GraphAssembler::Uint32Constant(int32_t value) {
+ return jsgraph()->Uint32Constant(value);
+}
+
+Node* GraphAssembler::Float64Constant(double value) {
+ return jsgraph()->Float64Constant(value);
+}
+
+Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
+ return jsgraph()->HeapConstant(object);
+}
+
+
+Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
+ return jsgraph()->ExternalConstant(ref);
+}
+
+Node* GraphAssembler::CEntryStubConstant(int result_size) {
+ return jsgraph()->CEntryStubConstant(result_size);
+}
+
+#define SINGLETON_CONST_DEF(Name) \
+ Node* GraphAssembler::Name() { return jsgraph()->Name(); }
+JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DEF)
+#undef SINGLETON_CONST_DEF
+
+#define PURE_UNOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* input) { \
+ return graph()->NewNode(machine()->Name(), input); \
+ }
+PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DEF)
+#undef PURE_UNOP_DEF
+
+#define PURE_BINOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* left, Node* right) { \
+ return graph()->NewNode(machine()->Name(), left, right); \
+ }
+PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
+#undef PURE_BINOP_DEF
+
+#define CHECKED_BINOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* left, Node* right) { \
+ return graph()->NewNode(machine()->Name(), left, right, current_control_); \
+ }
+CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
+#undef CHECKED_BINOP_DEF
+
+Node* GraphAssembler::Float64RoundDown(Node* value) {
+ if (machine()->Float64RoundDown().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundDown().op(), value);
+ }
+ return nullptr;
+}
+
+Node* GraphAssembler::Projection(int index, Node* value) {
+ return graph()->NewNode(common()->Projection(index), value, current_control_);
+}
+
+Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED), size,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->LoadField(access), object,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadElement(ElementAccess const& access, Node* object,
+ Node* index) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->LoadElement(access), object, index,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
+ Node* value) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->StoreField(access), object, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreElement(ElementAccess const& access, Node* object,
+ Node* index, Node* value) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->StoreElement(access), object, index,
+ value, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
+ Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->Store(rep), object, offset, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
+ return current_effect_ =
+ graph()->NewNode(machine()->Load(rep), object, offset,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Retain(Node* buffer) {
+ return current_effect_ =
+ graph()->NewNode(common()->Retain(), buffer, current_effect_);
+}
+
+Node* GraphAssembler::UnsafePointerAdd(Node* base, Node* external) {
+ return current_effect_ =
+ graph()->NewNode(machine()->UnsafePointerAdd(), base, external,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::ToNumber(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(ToNumberOperator(), ToNumberBuiltinConstant(),
+ value, NoContextConstant(), current_effect_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+ Node* frame_state) {
+ return current_control_ = current_effect_ =
+ graph()->NewNode(common()->DeoptimizeIf(reason), condition,
+ frame_state, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+ Node* frame_state) {
+ return current_control_ = current_effect_ =
+ graph()->NewNode(common()->DeoptimizeUnless(reason), condition,
+ frame_state, current_effect_, current_control_);
+}
+
+void GraphAssembler::Branch(Node* condition,
+ GraphAssemblerStaticLabel<1>* if_true,
+ GraphAssemblerStaticLabel<1>* if_false) {
+ DCHECK_NOT_NULL(current_control_);
+
+ BranchHint hint = BranchHint::kNone;
+ if (if_true->IsDeferred() != if_false->IsDeferred()) {
+ hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
+ }
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+ MergeState(if_true);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+ MergeState(if_false);
+
+ current_control_ = nullptr;
+ current_effect_ = nullptr;
+}
+
+// Extractors (should be only used when destructing the assembler.
+Node* GraphAssembler::ExtractCurrentControl() {
+ Node* result = current_control_;
+ current_control_ = nullptr;
+ return result;
+}
+
+Node* GraphAssembler::ExtractCurrentEffect() {
+ Node* result = current_effect_;
+ current_effect_ = nullptr;
+ return result;
+}
+
+void GraphAssembler::Reset(Node* effect, Node* control) {
+ current_effect_ = effect;
+ current_control_ = control;
+}
+
+Operator const* GraphAssembler::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kEliminatable);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
+}
+
+Node* GraphAssemblerLabel::PhiAt(size_t index) {
+ DCHECK(IsBound());
+ return GetBindingsPtrFor(index)[0];
+}
+
+GraphAssemblerLabel::GraphAssemblerLabel(GraphAssemblerLabelType is_deferred,
+ size_t merge_count, size_t var_count,
+ MachineRepresentation* representations,
+ Zone* zone)
+ : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred),
+ max_merge_count_(merge_count),
+ var_count_(var_count) {
+ effects_ = zone->NewArray<Node*>(MaxMergeCount() + 1);
+ for (size_t i = 0; i < MaxMergeCount() + 1; i++) {
+ effects_[i] = nullptr;
+ }
+
+ controls_ = zone->NewArray<Node*>(MaxMergeCount());
+ for (size_t i = 0; i < MaxMergeCount(); i++) {
+ controls_[i] = nullptr;
+ }
+
+ size_t num_bindings = (MaxMergeCount() + 1) * PhiCount() + 1;
+ bindings_ = zone->NewArray<Node*>(num_bindings);
+ for (size_t i = 0; i < num_bindings; i++) {
+ bindings_[i] = nullptr;
+ }
+
+ representations_ = zone->NewArray<MachineRepresentation>(PhiCount() + 1);
+ for (size_t i = 0; i < PhiCount(); i++) {
+ representations_[i] = representations[i];
+ }
+}
+
+GraphAssemblerLabel::~GraphAssemblerLabel() {
+ DCHECK(IsBound() || MergedCount() == 0);
+}
+
+Node** GraphAssemblerLabel::GetBindingsPtrFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return &bindings_[phi_index * (MaxMergeCount() + 1)];
+}
+
+void GraphAssemblerLabel::SetBinding(size_t phi_index, size_t merge_index,
+ Node* binding) {
+ DCHECK_LT(phi_index, PhiCount());
+ DCHECK_LT(merge_index, MaxMergeCount());
+ bindings_[phi_index * (MaxMergeCount() + 1) + merge_index] = binding;
+}
+
+MachineRepresentation GraphAssemblerLabel::GetRepresentationFor(
+ size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return representations_[phi_index];
+}
+
+Node** GraphAssemblerLabel::GetControlsPtr() { return controls_; }
+
+Node** GraphAssemblerLabel::GetEffectsPtr() { return effects_; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
new file mode 100644
index 0000000000..61f8f5b61d
--- /dev/null
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -0,0 +1,449 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
+#define V8_COMPILER_GRAPH_ASSEMBLER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGraph;
+class Graph;
+
+namespace compiler {
+
+#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(TruncateFloat64ToWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64Abs) \
+ V(BitcastWordToTagged)
+
+#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
+ V(WordShl) \
+ V(WordSar) \
+ V(WordAnd) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Shr) \
+ V(Word32Shl) \
+ V(IntAdd) \
+ V(IntSub) \
+ V(UintLessThan) \
+ V(Int32Add) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Int32LessThan) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mod) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual) \
+ V(Word32Equal) \
+ V(WordEqual)
+
+#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
+ V(Int32AddWithOverflow) \
+ V(Int32SubWithOverflow) \
+ V(Int32MulWithOverflow) \
+ V(Int32Mod) \
+ V(Int32Div) \
+ V(Uint32Mod) \
+ V(Uint32Div)
+
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(HeapNumberMapConstant) \
+ V(NoContextConstant) \
+ V(EmptyStringConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(FixedArrayMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(AllocateInNewSpaceStubConstant) \
+ V(AllocateInOldSpaceStubConstant)
+
+class GraphAssembler;
+
+enum class GraphAssemblerLabelType { kDeferred, kNonDeferred };
+
+// Label with statically known count of incoming branches and phis.
+template <size_t MergeCount, size_t VarCount = 0u>
+class GraphAssemblerStaticLabel {
+ public:
+ Node* PhiAt(size_t index);
+
+ template <typename... Reps>
+ explicit GraphAssemblerStaticLabel(GraphAssemblerLabelType is_deferred,
+ Reps... reps)
+ : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred) {
+ STATIC_ASSERT(VarCount == sizeof...(reps));
+ MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+ reps...};
+ for (size_t i = 0; i < VarCount; i++) {
+ representations_[i] = reps_array[i + 1];
+ }
+ }
+
+ ~GraphAssemblerStaticLabel() { DCHECK(IsBound() || MergedCount() == 0); }
+
+ private:
+ friend class GraphAssembler;
+
+ void SetBound() {
+ DCHECK(!IsBound());
+ DCHECK_EQ(merged_count_, MergeCount);
+ is_bound_ = true;
+ }
+ bool IsBound() const { return is_bound_; }
+
+ size_t PhiCount() const { return VarCount; }
+ size_t MaxMergeCount() const { return MergeCount; }
+ size_t MergedCount() const { return merged_count_; }
+ bool IsDeferred() const { return is_deferred_; }
+
+ // For each phi, the buffer must have at least MaxMergeCount() + 1
+ // node entries.
+ Node** GetBindingsPtrFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return &bindings_[phi_index * (MergeCount + 1)];
+ }
+ void SetBinding(size_t phi_index, size_t merge_index, Node* binding) {
+ DCHECK_LT(phi_index, PhiCount());
+ DCHECK_LT(merge_index, MergeCount);
+ bindings_[phi_index * (MergeCount + 1) + merge_index] = binding;
+ }
+ MachineRepresentation GetRepresentationFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return representations_[phi_index];
+ }
+ // The controls buffer must have at least MaxMergeCount() entries.
+ Node** GetControlsPtr() { return controls_; }
+ // The effects buffer must have at least MaxMergeCount() + 1 entries.
+ Node** GetEffectsPtr() { return effects_; }
+ void IncrementMergedCount() { merged_count_++; }
+
+ bool is_bound_ = false;
+ bool is_deferred_;
+ size_t merged_count_ = 0;
+ Node* effects_[MergeCount + 1]; // Extra element for control edge,
+ // so that we can use the array to
+ // construct EffectPhi.
+ Node* controls_[MergeCount];
+ Node* bindings_[(MergeCount + 1) * VarCount + 1];
+ MachineRepresentation representations_[VarCount + 1];
+};
+
+// General label (with zone allocated buffers for incoming branches and phi
+// inputs).
+class GraphAssemblerLabel {
+ public:
+ Node* PhiAt(size_t index);
+
+ GraphAssemblerLabel(GraphAssemblerLabelType is_deferred, size_t merge_count,
+ size_t var_count, MachineRepresentation* representations,
+ Zone* zone);
+
+ ~GraphAssemblerLabel();
+
+ private:
+ friend class GraphAssembler;
+
+ void SetBound() {
+ DCHECK(!is_bound_);
+ is_bound_ = true;
+ }
+ bool IsBound() const { return is_bound_; }
+ size_t PhiCount() const { return var_count_; }
+ size_t MaxMergeCount() const { return max_merge_count_; }
+ size_t MergedCount() const { return merged_count_; }
+ bool IsDeferred() const { return is_deferred_; }
+
+ // For each phi, the buffer must have at least MaxMergeCount() + 1
+ // node entries.
+ Node** GetBindingsPtrFor(size_t phi_index);
+ void SetBinding(size_t phi_index, size_t merge_index, Node* binding);
+ MachineRepresentation GetRepresentationFor(size_t phi_index);
+ // The controls buffer must have at least MaxMergeCount() entries.
+ Node** GetControlsPtr();
+ // The effects buffer must have at least MaxMergeCount() + 1 entries.
+ Node** GetEffectsPtr();
+ void IncrementMergedCount() { merged_count_++; }
+
+ bool is_bound_ = false;
+ bool is_deferred_;
+ size_t merged_count_ = 0;
+ size_t max_merge_count_;
+ size_t var_count_;
+ Node** effects_ = nullptr;
+ Node** controls_ = nullptr;
+ Node** bindings_ = nullptr;
+ MachineRepresentation* representations_ = nullptr;
+};
+
+class GraphAssembler {
+ public:
+ GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control, Zone* zone);
+
+ void Reset(Node* effect, Node* control);
+
+ // Create non-deferred label with statically known number of incoming
+ // gotos/branches.
+ template <size_t MergeCount, typename... Reps>
+ static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)> MakeLabel(
+ Reps... reps) {
+ return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+ GraphAssemblerLabelType::kNonDeferred, reps...);
+ }
+
+ // Create deferred label with statically known number of incoming
+ // gotos/branches.
+ template <size_t MergeCount, typename... Reps>
+ static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>
+ MakeDeferredLabel(Reps... reps) {
+ return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+ GraphAssemblerLabelType::kDeferred, reps...);
+ }
+
+ // Create label with number of incoming branches supplied at runtime.
+ template <typename... Reps>
+ GraphAssemblerLabel MakeLabelFor(GraphAssemblerLabelType is_deferred,
+ size_t merge_count, Reps... reps) {
+ MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+ reps...};
+ return GraphAssemblerLabel(is_deferred, merge_count, sizeof...(reps),
+ &(reps_array[1]), temp_zone());
+ }
+
+ // Value creation.
+ Node* IntPtrConstant(intptr_t value);
+ Node* Uint32Constant(int32_t value);
+ Node* Int32Constant(int32_t value);
+ Node* UniqueInt32Constant(int32_t value);
+ Node* SmiConstant(int32_t value);
+ Node* Float64Constant(double value);
+ Node* Projection(int index, Node* value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* CEntryStubConstant(int result_size);
+ Node* ExternalConstant(ExternalReference ref);
+
+#define SINGLETON_CONST_DECL(Name) Node* Name();
+ JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DECL)
+#undef SINGLETON_CONST_DECL
+
+#define PURE_UNOP_DECL(Name) Node* Name(Node* input);
+ PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DECL)
+#undef PURE_UNOP_DECL
+
+#define BINOP_DECL(Name) Node* Name(Node* left, Node* right);
+ PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+ CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+#undef BINOP_DECL
+
+ Node* Float64RoundDown(Node* value);
+
+ Node* ToNumber(Node* value);
+ Node* Allocate(PretenureFlag pretenure, Node* size);
+ Node* LoadField(FieldAccess const&, Node* object);
+ Node* LoadElement(ElementAccess const&, Node* object, Node* index);
+ Node* StoreField(FieldAccess const&, Node* object, Node* value);
+ Node* StoreElement(ElementAccess const&, Node* object, Node* index,
+ Node* value);
+
+ Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
+ Node* Load(MachineType rep, Node* object, Node* offset);
+
+ Node* Retain(Node* buffer);
+ Node* UnsafePointerAdd(Node* base, Node* external);
+
+ Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+ Node* frame_state);
+ template <typename... Args>
+ Node* Call(const CallDescriptor* desc, Args... args);
+ template <typename... Args>
+ Node* Call(const Operator* op, Args... args);
+
+ // Basic control operations.
+ template <class LabelType>
+ void Bind(LabelType* label);
+
+ template <class LabelType, typename... vars>
+ void Goto(LabelType* label, vars...);
+
+ void Branch(Node* condition, GraphAssemblerStaticLabel<1>* if_true,
+ GraphAssemblerStaticLabel<1>* if_false);
+
+ // Control helpers.
+ // {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
+ template <class LabelType, typename... vars>
+ void GotoIf(Node* condition, LabelType* label, vars...);
+
+ // {GotoUnless(c, l)} is equivalent to {Branch(c, templ, l);Bind(templ)}.
+ template <class LabelType, typename... vars>
+ void GotoUnless(Node* condition, LabelType* label, vars...);
+
+ // Extractors (should be only used when destructing/resetting the assembler).
+ Node* ExtractCurrentControl();
+ Node* ExtractCurrentEffect();
+
+ private:
+ template <class LabelType, typename... Vars>
+ void MergeState(LabelType label, Vars... vars);
+
+ Operator const* ToNumberOperator();
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph_->graph(); }
+ Zone* temp_zone() const { return temp_zone_; }
+ CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+ MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph()->simplified();
+ }
+
+ SetOncePointer<Operator const> to_number_operator_;
+ Zone* temp_zone_;
+ JSGraph* jsgraph_;
+ Node* current_effect_;
+ Node* current_control_;
+};
+
+template <size_t MergeCount, size_t VarCount>
+Node* GraphAssemblerStaticLabel<MergeCount, VarCount>::PhiAt(size_t index) {
+ DCHECK(IsBound());
+ return GetBindingsPtrFor(index)[0];
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::MergeState(LabelType label, Vars... vars) {
+ DCHECK(!label->IsBound());
+ size_t merged_count = label->MergedCount();
+ DCHECK_LT(merged_count, label->MaxMergeCount());
+ DCHECK_EQ(label->PhiCount(), sizeof...(vars));
+ label->GetEffectsPtr()[merged_count] = current_effect_;
+ label->GetControlsPtr()[merged_count] = current_control_;
+ // We need to start with nullptr to avoid 0-length arrays.
+ Node* var_array[] = {nullptr, vars...};
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->SetBinding(i, merged_count, var_array[i + 1]);
+ }
+ label->IncrementMergedCount();
+}
+
+template <class LabelType>
+void GraphAssembler::Bind(LabelType* label) {
+ DCHECK(current_control_ == nullptr);
+ DCHECK(current_effect_ == nullptr);
+ DCHECK(label->MaxMergeCount() > 0);
+ DCHECK_EQ(label->MaxMergeCount(), label->MergedCount());
+
+ int merge_count = static_cast<int>(label->MaxMergeCount());
+ if (merge_count == 1) {
+ current_control_ = label->GetControlsPtr()[0];
+ current_effect_ = label->GetEffectsPtr()[0];
+ label->SetBound();
+ return;
+ }
+
+ current_control_ = graph()->NewNode(common()->Merge(merge_count), merge_count,
+ label->GetControlsPtr());
+
+ Node** effects = label->GetEffectsPtr();
+ current_effect_ = effects[0];
+ for (size_t i = 1; i < label->MaxMergeCount(); i++) {
+ if (current_effect_ != effects[i]) {
+ effects[label->MaxMergeCount()] = current_control_;
+ current_effect_ = graph()->NewNode(common()->EffectPhi(merge_count),
+ merge_count + 1, effects);
+ break;
+ }
+ }
+
+ for (size_t var = 0; var < label->PhiCount(); var++) {
+ Node** bindings = label->GetBindingsPtrFor(var);
+ bindings[label->MaxMergeCount()] = current_control_;
+ bindings[0] = graph()->NewNode(
+ common()->Phi(label->GetRepresentationFor(var), merge_count),
+ merge_count + 1, bindings);
+ }
+
+ label->SetBound();
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::Goto(LabelType* label, Vars... vars) {
+ DCHECK_NOT_NULL(current_control_);
+ DCHECK_NOT_NULL(current_effect_);
+ MergeState(label, vars...);
+ current_control_ = nullptr;
+ current_effect_ = nullptr;
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoIf(Node* condition, LabelType* label, Vars... vars) {
+ BranchHint hint =
+ label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+ MergeState(label, vars...);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoUnless(Node* condition, LabelType* label,
+ Vars... vars) {
+ BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+ MergeState(label, vars...);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
+ const Operator* op = common()->Call(desc);
+ return Call(op, args...);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const Operator* op, Args... args) {
+ DCHECK_EQ(IrOpcode::kCall, op->opcode());
+ Node* args_array[] = {args..., current_effect_, current_control_};
+ int size = static_cast<int>(sizeof...(args)) + op->EffectInputCount() +
+ op->ControlInputCount();
+ Node* call = graph()->NewNode(op, size, args_array);
+ DCHECK_EQ(0, op->ControlOutputCount());
+ current_effect_ = call;
+ return call;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_GRAPH_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index b13b954714..117e569ad8 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -25,15 +25,17 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
-
GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
- stack_(zone) {}
-
+ stack_(zone) {
+ if (dead != nullptr) {
+ NodeProperties::SetType(dead_, Type::None());
+ }
+}
GraphReducer::~GraphReducer() {}
@@ -113,17 +115,23 @@ void GraphReducer::ReduceTop() {
if (node->IsDead()) return Pop(); // Node was killed while on stack.
+ Node::Inputs node_inputs = node->inputs();
+
// Recurse on an input if necessary.
- int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
- for (int i = start; i < node->InputCount(); i++) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ int start = entry.input_index < node_inputs.count() ? entry.input_index : 0;
+ for (int i = start; i < node_inputs.count(); ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
- for (int i = 0; i < start; i++) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ for (int i = 0; i < start; ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
// Remember the max node id before reduction.
@@ -139,10 +147,13 @@ void GraphReducer::ReduceTop() {
Node* const replacement = reduction.replacement();
if (replacement == node) {
// In-place update of {node}, may need to recurse on an input.
- for (int i = 0; i < node->InputCount(); ++i) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ Node::Inputs node_inputs = node->inputs();
+ for (int i = 0; i < node_inputs.count(); ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
}
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index ab20f8f11f..1043c91e2a 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -497,7 +497,11 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
if (position.IsKnown()) {
- os_ << " pos:" << position.ScriptOffset();
+ os_ << " pos:";
+ if (position.isInlined()) {
+ os_ << "inlining(" << position.InliningId() << "),";
+ }
+ os_ << position.ScriptOffset();
}
}
os_ << " <|@\n";
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 20afdc104d..e004896ea2 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -66,9 +66,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
- (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
}
@@ -185,7 +183,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-
class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
@@ -316,7 +313,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -331,7 +327,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(&done); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -896,10 +891,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ add(i.OutputRegister(0), i.InputRegister(2));
}
- __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -921,10 +916,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ sub(i.OutputRegister(0), i.InputRegister(2));
}
- __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -1611,61 +1606,66 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
-// Assembles a branch after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- IA32OperandConverter i(this, instr);
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
- switch (branch->condition) {
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
case kUnorderedEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kEqual:
- __ j(equal, tlabel);
+ return equal;
break;
case kUnorderedNotEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kNotEqual:
- __ j(not_equal, tlabel);
+ return not_equal;
break;
case kSignedLessThan:
- __ j(less, tlabel);
+ return less;
break;
case kSignedGreaterThanOrEqual:
- __ j(greater_equal, tlabel);
+ return greater_equal;
break;
case kSignedLessThanOrEqual:
- __ j(less_equal, tlabel);
+ return less_equal;
break;
case kSignedGreaterThan:
- __ j(greater, tlabel);
+ return greater;
break;
case kUnsignedLessThan:
- __ j(below, tlabel);
+ return below;
break;
case kUnsignedGreaterThanOrEqual:
- __ j(above_equal, tlabel);
+ return above_equal;
break;
case kUnsignedLessThanOrEqual:
- __ j(below_equal, tlabel);
+ return below_equal;
break;
case kUnsignedGreaterThan:
- __ j(above, tlabel);
+ return above;
break;
case kOverflow:
- __ j(overflow, tlabel);
+ return overflow;
break;
case kNotOverflow:
- __ j(no_overflow, tlabel);
+ return no_overflow;
break;
default:
UNREACHABLE();
+ return no_condition;
break;
}
+}
+
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
}
@@ -1675,6 +1675,71 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ IA32OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0, esi);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(esi, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_even, &end);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(condition), tlabel);
+ __ bind(&end);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1687,58 +1752,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = no_condition;
- switch (condition) {
- case kUnorderedEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kEqual:
- cc = equal;
- break;
- case kUnorderedNotEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kNotEqual:
- cc = not_equal;
- break;
- case kSignedLessThan:
- cc = less;
- break;
- case kSignedGreaterThanOrEqual:
- cc = greater_equal;
- break;
- case kSignedLessThanOrEqual:
- cc = less_equal;
- break;
- case kSignedGreaterThan:
- cc = greater;
- break;
- case kUnsignedLessThan:
- cc = below;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- cc = below_equal;
- break;
- case kUnsignedGreaterThan:
- cc = above;
- break;
- case kOverflow:
- cc = overflow;
- break;
- case kNotOverflow:
- cc = no_overflow;
- break;
- default:
- UNREACHABLE();
- break;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
}
+ Condition cc = FlagsConditionToCondition(condition);
+
__ bind(&check);
if (reg.is_byte_register()) {
// setcc for byte registers (al, bl, cl, dl).
@@ -2082,7 +2106,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
- uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+ uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
@@ -2093,7 +2117,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint64_t src = src_constant.ToFloat64AsInt();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index c827c68a5f..5548f55a1e 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -351,6 +351,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1203,10 +1208,13 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
+ } else {
+ DCHECK(cont->IsTrap());
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1222,9 +1230,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1240,21 +1251,54 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+ if (hint_node->opcode() == IrOpcode::kLoad) {
+ MachineType hint = LoadRepresentationOf(hint_node->op());
+ if (node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant) {
+ int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(node)
+ : OpParameter<int64_t>(node);
+ if (hint == MachineType::Int8()) {
+ if (constant >= std::numeric_limits<int8_t>::min() &&
+ constant <= std::numeric_limits<int8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint8()) {
+ if (constant >= std::numeric_limits<uint8_t>::min() &&
+ constant <= std::numeric_limits<uint8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int16()) {
+ if (constant >= std::numeric_limits<int16_t>::min() &&
+ constant <= std::numeric_limits<int16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint16()) {
+ if (constant >= std::numeric_limits<uint16_t>::min() &&
+ constant <= std::numeric_limits<uint16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int32()) {
+ return hint;
+ } else if (hint == MachineType::Uint32()) {
+ if (constant >= 0) return hint;
+ }
+ }
+ }
+ return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+ : MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // Currently, if one of the two operands is not a Load, we don't know what its
- // machine representation is, so we bail out.
- // TODO(epertoso): we can probably get some size information out of immediates
- // and phi nodes.
- if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
- return opcode;
- }
+ // TODO(epertoso): we can probably get some size information out of phi nodes.
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- MachineType left_type = LoadRepresentationOf(left->op());
- MachineType right_type = LoadRepresentationOf(right->op());
+ MachineType left_type = MachineTypeForNarrow(left, right);
+ MachineType right_type = MachineTypeForNarrow(right, left);
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
@@ -1332,10 +1376,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
- // TODO(epertoso): we should use `narrowed_opcode' here once we match
- // immediates too.
- return VisitCompareWithMemoryOperand(selector, opcode, left,
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+ return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
g.UseImmediate(right), cont);
}
return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1352,11 +1394,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1501,6 +1538,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
IA32OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 6242e9804e..00b2733b3b 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -152,7 +152,8 @@ enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
kFlags_deoptimize = 2,
- kFlags_set = 3
+ kFlags_set = 3,
+ kFlags_trap = 4
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -207,9 +208,9 @@ typedef int32_t InstructionCode;
// the instruction.
typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
typedef BitField<AddressingMode, 8, 5> AddressingModeField;
-typedef BitField<FlagsMode, 13, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
-typedef BitField<int, 20, 12> MiscField;
+typedef BitField<FlagsMode, 13, 3> FlagsModeField;
+typedef BitField<FlagsCondition, 16, 5> FlagsConditionField;
+typedef BitField<int, 21, 11> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 6cb87ea0c0..1b1fa12e6e 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
-#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
@@ -345,10 +345,17 @@ class FlagsContinuation final {
return FlagsContinuation(condition, result);
}
+ // Creates a new flags continuation for a wasm trap.
+ static FlagsContinuation ForTrap(FlagsCondition condition,
+ Runtime::FunctionId trap_id, Node* result) {
+ return FlagsContinuation(condition, trap_id, result);
+ }
+
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const { return mode_ == kFlags_branch; }
bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
+ bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
DCHECK(!IsNone());
return condition_;
@@ -365,6 +372,10 @@ class FlagsContinuation final {
DCHECK(IsSet());
return frame_state_or_result_;
}
+ Runtime::FunctionId trap_id() const {
+ DCHECK(IsTrap());
+ return trap_id_;
+ }
BasicBlock* true_block() const {
DCHECK(IsBranch());
return true_block_;
@@ -437,6 +448,15 @@ class FlagsContinuation final {
DCHECK_NOT_NULL(result);
}
+ FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
+ Node* result)
+ : mode_(kFlags_trap),
+ condition_(condition),
+ frame_state_or_result_(result),
+ trap_id_(trap_id) {
+ DCHECK_NOT_NULL(result);
+ }
+
FlagsMode const mode_;
FlagsCondition condition_;
DeoptimizeReason reason_; // Only value if mode_ == kFlags_deoptimize
@@ -444,6 +464,7 @@ class FlagsContinuation final {
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+ Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 8f899f3c8c..ae96b9106f 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -127,7 +127,6 @@ void InstructionSelector::AddInstruction(Instruction* instr) {
}
}
-
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
size_t temp_count,
@@ -414,13 +413,10 @@ void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
-
namespace {
-enum class FrameStateInputKind { kAny, kStackSlot };
-
-InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
- FrameStateInputKind kind,
+InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
+ Node* input, FrameStateInputKind kind,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kNone) {
return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
@@ -432,8 +428,30 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
+ case IrOpcode::kHeapConstant: {
+ if (!CanBeTaggedPointer(rep)) {
+ // If we have inconsistent static and dynamic types, e.g. if we
+ // smi-check a string, we can get here with a heap object that
+ // says it is a smi. In that case, we return an invalid instruction
+ // operand, which will be interpreted as an optimized-out value.
+
+ // TODO(jarin) Ideally, we should turn the current instruction
+ // into an abort (we should never execute it).
+ return InstructionOperand();
+ }
+
+ Handle<HeapObject> constant = OpParameter<Handle<HeapObject>>(input);
+ Heap::RootListIndex root_index;
+ if (isolate->heap()->IsRootHandle(constant, &root_index) &&
+ root_index == Heap::kOptimizedOutRootIndex) {
+ // For an optimized-out object we return an invalid instruction
+ // operand, so that we take the fast path for optimized-out values.
+ return InstructionOperand();
+ }
+
+ return g->UseImmediate(input);
+ }
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
@@ -452,6 +470,7 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
return InstructionOperand();
}
+} // namespace
class StateObjectDeduplicator {
public:
@@ -477,14 +496,16 @@ class StateObjectDeduplicator {
ZoneVector<Node*> objects_;
};
-
// Returns the number of instruction operands added to inputs.
-size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
- InstructionOperandVector* inputs,
- OperandGenerator* g,
- StateObjectDeduplicator* deduplicator,
- Node* input, MachineType type,
- FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddOperandToStateValueDescriptor(
+ StateValueList* values, InstructionOperandVector* inputs,
+ OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
+ MachineType type, FrameStateInputKind kind, Zone* zone) {
+ if (input == nullptr) {
+ values->PushOptimizedOut();
+ return 0;
+ }
+
switch (input->opcode()) {
case IrOpcode::kObjectState: {
UNREACHABLE();
@@ -495,41 +516,45 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
if (id == StateObjectDeduplicator::kNotDuplicated) {
size_t entries = 0;
id = deduplicator->InsertObject(input);
- descriptor->fields().push_back(
- StateValueDescriptor::Recursive(zone, id));
- StateValueDescriptor* new_desc = &descriptor->fields().back();
+ StateValueList* nested = values->PushRecursiveField(zone, id);
int const input_count = input->op()->ValueInputCount();
ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
for (int i = 0; i < input_count; ++i) {
entries += AddOperandToStateValueDescriptor(
- new_desc, inputs, g, deduplicator, input->InputAt(i),
- types->at(i), kind, zone);
+ nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
+ kind, zone);
}
return entries;
} else {
// Crankshaft counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
- descriptor->fields().push_back(
- StateValueDescriptor::Duplicate(zone, id));
+ values->PushDuplicate(id);
return 0;
}
}
default: {
- inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
- descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
- return 1;
+ InstructionOperand op =
+ OperandForDeopt(isolate(), g, input, kind, type.representation());
+ if (op.kind() == InstructionOperand::INVALID) {
+ // Invalid operand means the value is impossible or optimized-out.
+ values->PushOptimizedOut();
+ return 0;
+ } else {
+ inputs->push_back(op);
+ values->PushPlain(type);
+ return 1;
+ }
}
}
}
// Returns the number of instruction operands added to inputs.
-size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
- Node* state, OperandGenerator* g,
- StateObjectDeduplicator* deduplicator,
- InstructionOperandVector* inputs,
- FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddInputsToFrameStateDescriptor(
+ FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
size_t entries = 0;
@@ -553,8 +578,12 @@ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
- StateValueDescriptor* values_descriptor =
- descriptor->GetStateValueDescriptor();
+ StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
+
+ DCHECK_EQ(values_descriptor->size(), 0u);
+ values_descriptor->ReserveSize(
+ descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
@@ -583,8 +612,6 @@ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
return entries;
}
-} // namespace
-
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -796,17 +823,30 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
}
+bool InstructionSelector::IsSourcePositionUsed(Node* node) {
+ return (source_position_mode_ == kAllSourcePositions ||
+ node->opcode() == IrOpcode::kCall ||
+ node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+}
+
void InstructionSelector::VisitBlock(BasicBlock* block) {
DCHECK(!current_block_);
current_block_ = block;
- int current_block_end = static_cast<int>(instructions_.size());
+ auto current_num_instructions = [&] {
+ DCHECK_GE(kMaxInt, instructions_.size());
+ return static_cast<int>(instructions_.size());
+ };
+ int current_block_end = current_num_instructions();
int effect_level = 0;
for (Node* const node : *block) {
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore ||
- node->opcode() == IrOpcode::kCall) {
+ node->opcode() == IrOpcode::kCall ||
+ node->opcode() == IrOpcode::kProtectedLoad ||
+ node->opcode() == IrOpcode::kProtectedStore) {
++effect_level;
}
SetEffectLevel(node, effect_level);
@@ -818,10 +858,25 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(block->control_input(), effect_level);
}
+ auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
+ if (instruction_selection_failed()) return false;
+ if (current_num_instructions() == instruction_start) return true;
+ std::reverse(instructions_.begin() + instruction_start,
+ instructions_.end());
+ if (!node) return true;
+ SourcePosition source_position = source_positions_->GetSourcePosition(node);
+ if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
+ sequence()->SetSourcePosition(instructions_[instruction_start],
+ source_position);
+ }
+ return true;
+ };
+
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
- std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+ if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+ return;
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
@@ -830,19 +885,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the code "bottom
// up".
- size_t current_node_end = instructions_.size();
+ int current_node_end = current_num_instructions();
VisitNode(node);
- if (instruction_selection_failed()) return;
- std::reverse(instructions_.begin() + current_node_end, instructions_.end());
- if (instructions_.size() == current_node_end) continue;
- // Mark source position on first instruction emitted.
- SourcePosition source_position = source_positions_->GetSourcePosition(node);
- if (source_position.IsKnown() &&
- (source_position_mode_ == kAllSourcePositions ||
- node->opcode() == IrOpcode::kCall)) {
- sequence()->SetSourcePosition(instructions_[current_node_end],
- source_position);
- }
+ if (!FinishEmittedInstructions(node, current_node_end)) return;
}
// We're done with the block.
@@ -1013,6 +1058,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
+ case IrOpcode::kTrapIf:
+ return VisitTrapIf(node, static_cast<Runtime::FunctionId>(
+ OpParameter<int32_t>(node->op())));
+ case IrOpcode::kTrapUnless:
+ return VisitTrapUnless(node, static_cast<Runtime::FunctionId>(
+ OpParameter<int32_t>(node->op())));
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
@@ -1033,6 +1084,8 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kStore:
return VisitStore(node);
+ case IrOpcode::kProtectedStore:
+ return VisitProtectedStore(node);
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
@@ -1387,15 +1440,56 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kAtomicStore:
return VisitAtomicStore(node);
- case IrOpcode::kProtectedLoad:
+ case IrOpcode::kProtectedLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
+ }
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kCreateFloat32x4:
+ return MarkAsSimd128(node), VisitCreateFloat32x4(node);
+ case IrOpcode::kFloat32x4ExtractLane:
+ return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
+ case IrOpcode::kFloat32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
+ case IrOpcode::kFloat32x4FromInt32x4:
+ return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
+ case IrOpcode::kFloat32x4FromUint32x4:
+ return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
+ case IrOpcode::kFloat32x4Abs:
+ return MarkAsSimd128(node), VisitFloat32x4Abs(node);
+ case IrOpcode::kFloat32x4Neg:
+ return MarkAsSimd128(node), VisitFloat32x4Neg(node);
+ case IrOpcode::kFloat32x4Add:
+ return MarkAsSimd128(node), VisitFloat32x4Add(node);
+ case IrOpcode::kFloat32x4Sub:
+ return MarkAsSimd128(node), VisitFloat32x4Sub(node);
+ case IrOpcode::kFloat32x4Equal:
+ return MarkAsSimd128(node), VisitFloat32x4Equal(node);
+ case IrOpcode::kFloat32x4NotEqual:
+ return MarkAsSimd128(node), VisitFloat32x4NotEqual(node);
case IrOpcode::kCreateInt32x4:
return MarkAsSimd128(node), VisitCreateInt32x4(node);
case IrOpcode::kInt32x4ExtractLane:
return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
+ case IrOpcode::kInt32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
+ case IrOpcode::kInt32x4FromFloat32x4:
+ return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
+ case IrOpcode::kUint32x4FromFloat32x4:
+ return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
+ case IrOpcode::kInt32x4Add:
+ return MarkAsSimd128(node), VisitInt32x4Add(node);
+ case IrOpcode::kInt32x4Sub:
+ return MarkAsSimd128(node), VisitInt32x4Sub(node);
+ case IrOpcode::kInt32x4Equal:
+ return MarkAsSimd128(node), VisitInt32x4Equal(node);
+ case IrOpcode::kInt32x4NotEqual:
+ return MarkAsSimd128(node), VisitInt32x4NotEqual(node);
+ case IrOpcode::kSimd32x4Select:
+ return MarkAsSimd128(node), VisitSimd32x4Select(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1538,7 +1632,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
void InstructionSelector::VisitStackSlot(Node* node) {
- int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+ int size = StackSlotSizeOf(node->op());
int slot = frame_->AllocateSpillSlot(size);
OperandGenerator g(this);
@@ -1547,8 +1641,7 @@ void InstructionSelector::VisitStackSlot(Node* node) {
}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
- OperandGenerator g(this);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ EmitIdentity(node);
}
void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
@@ -1723,13 +1816,70 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -1970,7 +2120,8 @@ void InstructionSelector::VisitReturn(Node* ret) {
DCHECK_GE(input_count, 1);
auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
Node* pop_count = ret->InputAt(0);
- value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+ value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
+ pop_count->opcode() == IrOpcode::kInt64Constant)
? g.UseImmediate(pop_count)
: g.UseRegister(pop_count);
for (int i = 1; i < input_count; ++i) {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 65ba8f7c71..b7753ce7b7 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -26,6 +26,7 @@ class FlagsContinuation;
class Linkage;
class OperandGenerator;
struct SwitchInfo;
+class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
@@ -42,6 +43,8 @@ class PushParameter {
MachineType type_;
};
+enum class FrameStateInputKind { kAny, kStackSlot };
+
// Instruction selection generates an InstructionSequence for a given Schedule.
class V8_EXPORT_PRIVATE InstructionSelector final {
public:
@@ -286,6 +289,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
int GetTempsCountForTailCallFromJSFunction();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone);
+ size_t AddOperandToStateValueDescriptor(StateValueList* values,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
@@ -307,8 +321,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
- MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
- MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
void VisitFinishRegion(Node* node);
@@ -321,6 +334,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitCall(Node* call, BasicBlock* handler = nullptr);
void VisitDeoptimizeIf(Node* node);
void VisitDeoptimizeUnless(Node* node);
+ void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
+ void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
@@ -351,6 +366,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool instruction_selection_failed() { return instruction_selection_failed_; }
void MarkPairProjectionsAsWord32(Node* node);
+ bool IsSourcePositionUsed(Node* node);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 3b2311a23f..c4560b6e76 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -433,6 +433,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os << "deoptimize";
case kFlags_set:
return os << "set";
+ case kFlags_trap:
+ return os << "trap";
}
UNREACHABLE();
return os;
@@ -985,8 +987,18 @@ void InstructionSequence::PrintBlock(int block_id) const {
}
const RegisterConfiguration*
-InstructionSequence::GetRegisterConfigurationForTesting() {
- return GetRegConfig();
+ InstructionSequence::registerConfigurationForTesting_ = nullptr;
+
+const RegisterConfiguration*
+InstructionSequence::RegisterConfigurationForTesting() {
+ DCHECK(registerConfigurationForTesting_ != nullptr);
+ return registerConfigurationForTesting_;
+}
+
+void InstructionSequence::SetRegisterConfigurationForTesting(
+ const RegisterConfiguration* regConfig) {
+ registerConfigurationForTesting_ = regConfig;
+ GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
}
FrameStateDescriptor::FrameStateDescriptor(
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 327c8c1192..d62ffc43bd 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1065,16 +1065,33 @@ class V8_EXPORT_PRIVATE Constant final {
}
float ToFloat32() const {
+ // TODO(ahaas): We should remove this function. If value_ has the bit
+ // representation of a signalling NaN, then returning it as float can cause
+ // the signalling bit to flip, and value_ is returned as a quiet NaN.
DCHECK_EQ(kFloat32, type());
return bit_cast<float>(static_cast<int32_t>(value_));
}
+ uint32_t ToFloat32AsInt() const {
+ DCHECK_EQ(kFloat32, type());
+ return bit_cast<uint32_t>(static_cast<int32_t>(value_));
+ }
+
double ToFloat64() const {
+ // TODO(ahaas): We should remove this function. If value_ has the bit
+ // representation of a signalling NaN, then returning it as float can cause
+ // the signalling bit to flip, and value_ is returned as a quiet NaN.
if (type() == kInt32) return ToInt32();
DCHECK_EQ(kFloat64, type());
return bit_cast<double>(value_);
}
+ uint64_t ToFloat64AsInt() const {
+ if (type() == kInt32) return ToInt32();
+ DCHECK_EQ(kFloat64, type());
+ return bit_cast<uint64_t>(value_);
+ }
+
ExternalReference ToExternalReference() const {
DCHECK_EQ(kExternalReference, type());
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
@@ -1104,52 +1121,125 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant);
// Forward declarations.
class FrameStateDescriptor;
-
-enum class StateValueKind { kPlain, kNested, kDuplicate };
-
+enum class StateValueKind : uint8_t {
+ kPlain,
+ kOptimizedOut,
+ kNested,
+ kDuplicate
+};
class StateValueDescriptor {
public:
- explicit StateValueDescriptor(Zone* zone)
+ StateValueDescriptor()
: kind_(StateValueKind::kPlain),
type_(MachineType::AnyTagged()),
- id_(0),
- fields_(zone) {}
+ id_(0) {}
- static StateValueDescriptor Plain(Zone* zone, MachineType type) {
- return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+ static StateValueDescriptor Plain(MachineType type) {
+ return StateValueDescriptor(StateValueKind::kPlain, type, 0);
+ }
+ static StateValueDescriptor OptimizedOut() {
+ return StateValueDescriptor(StateValueKind::kOptimizedOut,
+ MachineType::AnyTagged(), 0);
}
- static StateValueDescriptor Recursive(Zone* zone, size_t id) {
- return StateValueDescriptor(StateValueKind::kNested, zone,
+ static StateValueDescriptor Recursive(size_t id) {
+ return StateValueDescriptor(StateValueKind::kNested,
MachineType::AnyTagged(), id);
}
- static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
- return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+ static StateValueDescriptor Duplicate(size_t id) {
+ return StateValueDescriptor(StateValueKind::kDuplicate,
MachineType::AnyTagged(), id);
}
- size_t size() { return fields_.size(); }
- ZoneVector<StateValueDescriptor>& fields() { return fields_; }
int IsPlain() { return kind_ == StateValueKind::kPlain; }
+ int IsOptimizedOut() { return kind_ == StateValueKind::kOptimizedOut; }
int IsNested() { return kind_ == StateValueKind::kNested; }
int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
MachineType type() const { return type_; }
- MachineType GetOperandType(size_t index) const {
- return fields_[index].type_;
- }
size_t id() const { return id_; }
private:
- StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
- size_t id)
- : kind_(kind), type_(type), id_(id), fields_(zone) {}
+ StateValueDescriptor(StateValueKind kind, MachineType type, size_t id)
+ : kind_(kind), type_(type), id_(id) {}
StateValueKind kind_;
MachineType type_;
size_t id_;
- ZoneVector<StateValueDescriptor> fields_;
};
+class StateValueList {
+ public:
+ explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
+
+ size_t size() { return fields_.size(); }
+
+ struct Value {
+ StateValueDescriptor* desc;
+ StateValueList* nested;
+
+ Value(StateValueDescriptor* desc, StateValueList* nested)
+ : desc(desc), nested(nested) {}
+ };
+
+ class iterator {
+ public:
+ // Bare minimum of operators needed for range iteration.
+ bool operator!=(const iterator& other) const {
+ return field_iterator != other.field_iterator;
+ }
+ bool operator==(const iterator& other) const {
+ return field_iterator == other.field_iterator;
+ }
+ iterator& operator++() {
+ if (field_iterator->IsNested()) {
+ nested_iterator++;
+ }
+ ++field_iterator;
+ return *this;
+ }
+ Value operator*() {
+ StateValueDescriptor* desc = &(*field_iterator);
+ StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
+ return Value(desc, nested);
+ }
+
+ private:
+ friend class StateValueList;
+
+ iterator(ZoneVector<StateValueDescriptor>::iterator it,
+ ZoneVector<StateValueList*>::iterator nested)
+ : field_iterator(it), nested_iterator(nested) {}
+
+ ZoneVector<StateValueDescriptor>::iterator field_iterator;
+ ZoneVector<StateValueList*>::iterator nested_iterator;
+ };
+
+ void ReserveSize(size_t size) { fields_.reserve(size); }
+
+ StateValueList* PushRecursiveField(Zone* zone, size_t id) {
+ fields_.push_back(StateValueDescriptor::Recursive(id));
+ StateValueList* nested =
+ new (zone->New(sizeof(StateValueList))) StateValueList(zone);
+ nested_.push_back(nested);
+ return nested;
+ }
+ void PushDuplicate(size_t id) {
+ fields_.push_back(StateValueDescriptor::Duplicate(id));
+ }
+ void PushPlain(MachineType type) {
+ fields_.push_back(StateValueDescriptor::Plain(type));
+ }
+ void PushOptimizedOut() {
+ fields_.push_back(StateValueDescriptor::OptimizedOut());
+ }
+
+ iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
+ iterator end() { return iterator(fields_.end(), nested_.end()); }
+
+ private:
+ ZoneVector<StateValueDescriptor> fields_;
+ ZoneVector<StateValueList*> nested_;
+};
class FrameStateDescriptor : public ZoneObject {
public:
@@ -1178,10 +1268,7 @@ class FrameStateDescriptor : public ZoneObject {
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
- MachineType GetType(size_t index) const {
- return values_.GetOperandType(index);
- }
- StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
+ StateValueList* GetStateValueDescriptors() { return &values_; }
static const int kImpossibleValue = 0xdead;
@@ -1192,7 +1279,7 @@ class FrameStateDescriptor : public ZoneObject {
size_t parameters_count_;
size_t locals_count_;
size_t stack_count_;
- StateValueDescriptor values_;
+ StateValueList values_;
MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
};
@@ -1500,7 +1587,9 @@ class V8_EXPORT_PRIVATE InstructionSequence final
void ValidateDeferredBlockEntryPaths() const;
void ValidateSSA() const;
- const RegisterConfiguration* GetRegisterConfigurationForTesting();
+ static void SetRegisterConfigurationForTesting(
+ const RegisterConfiguration* regConfig);
+ static void ClearRegisterConfigurationForTesting();
private:
friend V8_EXPORT_PRIVATE std::ostream& operator<<(
@@ -1508,6 +1597,9 @@ class V8_EXPORT_PRIVATE InstructionSequence final
typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
+ static const RegisterConfiguration* RegisterConfigurationForTesting();
+ static const RegisterConfiguration* registerConfigurationForTesting_;
+
Isolate* isolate_;
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 62523ca45c..ff61aa765d 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -61,7 +61,8 @@ void Int64Lowering::LowerGraph() {
// that they are processed after all other nodes.
PreparePhiReplacement(input);
stack_.push_front({input, 0});
- } else if (input->opcode() == IrOpcode::kEffectPhi) {
+ } else if (input->opcode() == IrOpcode::kEffectPhi ||
+ input->opcode() == IrOpcode::kLoop) {
stack_.push_front({input, 0});
} else {
stack_.push_back({input, 0});
@@ -104,6 +105,9 @@ static int GetReturnCountAfterLowering(
void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
Node*& index_high) {
+ if (HasReplacementLow(index)) {
+ index = GetReplacementLow(index);
+ }
#if defined(V8_TARGET_LITTLE_ENDIAN)
index_low = index;
index_high = graph()->NewNode(machine()->Int32Add(), index,
@@ -233,9 +237,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
- if (HasReplacementLow(node->InputAt(2))) {
- node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
- }
+ DefaultLowering(node);
}
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 2962e24502..ec1b01a2a1 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-builtin-reducer.h"
+#include "src/base/bits.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
@@ -128,11 +129,10 @@ MaybeHandle<Map> GetMapWitness(Node* node) {
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
- if (dominator->op()->ValueInputCount() == 2) {
- HeapObjectMatcher m(dominator->InputAt(1));
- if (m.HasValue()) return Handle<Map>::cast(m.Value());
- }
- return MaybeHandle<Map>();
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
+ return (maps.size() == 1) ? MaybeHandle<Map>(maps[0])
+ : MaybeHandle<Map>();
}
if (dominator->op()->EffectInputCount() != 1) {
// Didn't find any appropriate CheckMaps node.
@@ -235,17 +235,27 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Node* control = NodeProperties::GetControlInput(node);
if (iter_kind == ArrayIteratorKind::kTypedArray) {
- // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
- // is potentially a deopt loop, but should be extremely unlikely.
- DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
-
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check = graph()->NewNode(simplified()->BooleanNot(), check);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // For JSTypedArray iterator methods, deopt if the buffer is neutered.
+ // This is potentially a deopt loop, but should be extremely unlikely.
+ DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ // Deoptimize if the {buffer} has been neutered.
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check = graph()->NewNode(simplified()->BooleanNot(), check);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
}
int map_index = -1;
@@ -310,6 +320,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Node* value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+ NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, jsgraph()->Constant(map), effect, control);
effect = graph()->NewNode(
@@ -403,12 +414,17 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
} else {
// For value/entry iteration, first step is a mapcheck to ensure
// inlining is still valid.
+ Node* array_map = etrue1 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ array, etrue1, if_true1);
Node* orig_map = etrue1 =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayIteratorObjectMap()),
iterator, etrue1, if_true1);
- etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
- etrue1, if_true1);
+ Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
+ array_map, orig_map);
+ etrue1 = graph()->NewNode(simplified()->CheckIf(), check_map, etrue1,
+ if_true1);
}
if (kind != IterationKind::kKeys) {
@@ -536,11 +552,20 @@ Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
array, efalse0, if_false0);
- Node* check1 = efalse0 = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
- check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
- efalse0 =
- graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Deoptimize if the array buffer was neutered.
+ Node* check1 = efalse0 = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+ check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+ efalse0 =
+ graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+ }
Node* length = efalse0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
@@ -891,14 +916,11 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
// Check if all maps have the given {instance_type}.
- for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
- Node* const map = NodeProperties::GetValueInput(dominator, i);
- Type* const map_type = NodeProperties::GetType(map);
- if (!map_type->IsHeapConstant()) return false;
- Handle<Map> const map_value =
- Handle<Map>::cast(map_type->AsHeapConstant()->Value());
- if (map_value->instance_type() != instance_type) return false;
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (maps[i]->instance_type() != instance_type) return false;
}
return true;
}
@@ -930,6 +952,14 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
} // namespace
+// ES6 section 20.3.3.1 Date.now ( )
+Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
+ NodeProperties::RemoveValueInputs(node);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(Runtime::kDateCurrentTime));
+ return Changed(node);
+}
+
// ES6 section 20.3.4.10 Date.prototype.getTime ( )
Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -945,34 +975,6 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
return NoChange();
}
-// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
-Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* object = (node->op()->ValueInputCount() >= 3)
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
- // stack trace doesn't contain the @@hasInstance call; we have the
- // corresponding bug in the baseline case. Some massaging of the frame
- // state would be necessary here.
-
- // Morph this {node} into a JSOrdinaryHasInstance node.
- node->ReplaceInput(0, receiver);
- node->ReplaceInput(1, object);
- node->ReplaceInput(2, context);
- node->ReplaceInput(3, frame_state);
- node->ReplaceInput(4, effect);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- return Changed(node);
-}
-
// ES6 section 18.2.2 isFinite ( number )
Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1485,6 +1487,117 @@ Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
return NoChange();
}
+// ES6 section #sec-object.create Object.create(proto, properties)
+Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
+ // We need exactly target, receiver and value parameters.
+ int arg_count = node->op()->ValueInputCount();
+ if (arg_count != 3) return NoChange();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* prototype = NodeProperties::GetValueInput(node, 2);
+ Type* prototype_type = NodeProperties::GetType(prototype);
+ Handle<Map> instance_map;
+ if (!prototype_type->IsHeapConstant()) return NoChange();
+ Handle<HeapObject> prototype_const =
+ prototype_type->AsHeapConstant()->Value();
+ if (!prototype_const->IsNull(isolate()) && !prototype_const->IsJSReceiver()) {
+ return NoChange();
+ }
+ instance_map = Map::GetObjectCreateMap(prototype_const);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ if (instance_map->is_dictionary_map()) {
+ // Allocated an empty NameDictionary as backing store for the properties.
+ Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
+ int capacity =
+ NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
+ DCHECK(base::bits::IsPowerOfTwo32(capacity));
+ int length = NameDictionary::EntryToIndex(capacity);
+ int size = NameDictionary::SizeFor(length);
+
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(size), effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, jsgraph()->HeapConstant(map), effect, control);
+
+ // Initialize FixedArray fields.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArrayLength()), value,
+ jsgraph()->SmiConstant(length), effect, control);
+ // Initialize HashTable fields.
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForHashTableBaseNumberOfElements()),
+ value, jsgraph()->SmiConstant(0), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForHashTableBaseNumberOfDeletedElement()),
+ value, jsgraph()->SmiConstant(0), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHashTableBaseCapacity()),
+ value, jsgraph()->SmiConstant(capacity), effect, control);
+ // Initialize Dictionary fields.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
+ value, undefined, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForDictionaryNextEnumerationIndex()),
+ value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
+ control);
+ // Initialize hte Properties fields.
+ for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
+ index < length; index++) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier)),
+ value, undefined, effect, control);
+ }
+ properties = effect =
+ graph()->NewNode(common()->FinishRegion(), value, effect);
+ }
+
+ int const instance_size = instance_map->instance_size();
+ if (instance_size > kMaxRegularHeapObjectSize) return NoChange();
+ dependencies()->AssumeInitialMapCantChange(instance_map);
+
+ // Emit code to allocate the JSObject instance for the given
+ // {instance_map}.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(instance_size), effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->HeapConstant(instance_map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ properties, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ // Initialize Object fields.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ for (int offset = JSObject::kHeaderSize; offset < instance_size;
+ offset += kPointerSize) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier)),
+ value, undefined, effect, control);
+ }
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ // replace it
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
JSCallReduction r(node);
@@ -1531,8 +1644,17 @@ Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (index_type->Is(Type::Unsigned32())) {
+ if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
if (Node* receiver = GetStringWitness(node)) {
+ if (!index_type->Is(Type::Unsigned32())) {
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
// Determine the {receiver} length.
Node* receiver_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1544,16 +1666,10 @@ Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check, control);
+ // Return the character from the {receiver} as single character string.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue;
- {
- // Load the character from the {receiver}.
- vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- index, if_true);
-
- // Return it as single character string.
- vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
- }
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ index, if_true);
// Return the empty string otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -1582,8 +1698,17 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (index_type->Is(Type::Unsigned32())) {
+ if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
if (Node* receiver = GetStringWitness(node)) {
+ if (!index_type->Is(Type::Unsigned32())) {
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
// Determine the {receiver} length.
Node* receiver_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1632,6 +1757,7 @@ Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
Node* value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+ NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, map, effect, control);
effect = graph()->NewNode(
@@ -1805,21 +1931,29 @@ Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* control = NodeProperties::GetControlInput(node);
if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
// Load the {receiver}s field.
- Node* receiver_value = effect = graph()->NewNode(
- simplified()->LoadField(access), receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect =
- graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
- receiver_buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
- Node* value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), receiver_value);
+ Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+ receiver, effect, control);
+
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* check = effect =
+ graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+ receiver_buffer, effect, control);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), value);
+ }
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1846,11 +1980,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayPop(node);
case kArrayPush:
return ReduceArrayPush(node);
+ case kDateNow:
+ return ReduceDateNow(node);
case kDateGetTime:
return ReduceDateGetTime(node);
- case kFunctionHasInstance:
- return ReduceFunctionHasInstance(node);
- break;
case kGlobalIsFinite:
reduction = ReduceGlobalIsFinite(node);
break;
@@ -1971,6 +2104,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kNumberParseInt:
reduction = ReduceNumberParseInt(node);
break;
+ case kObjectCreate:
+ reduction = ReduceObjectCreate(node);
+ break;
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 4af3084ea3..295da8d1bc 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -57,8 +57,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind);
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
+ Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
- Reduction ReduceFunctionHasInstance(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMathAbs(Node* node);
@@ -99,6 +99,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsNaN(Node* node);
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
+ Reduction ReduceObjectCreate(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index e48fce91c4..1caf65da01 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,7 +4,10 @@
#include "src/compiler/js-call-reducer.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
@@ -189,6 +192,35 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
+Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* object = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+ // stack trace doesn't contain the @@hasInstance call; we have the
+ // corresponding bug in the baseline case. Some massaging of the frame
+ // state would be necessary here.
+
+ // Morph this {node} into a JSOrdinaryHasInstance node.
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, object);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+ return Changed(node);
+}
+
namespace {
// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
@@ -228,8 +260,59 @@ MaybeHandle<Map> InferReceiverMap(Node* node) {
}
}
+bool CanInlineApiCall(Isolate* isolate, Node* node,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK(node->opcode() == IrOpcode::kJSCallFunction);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+ if (function_template_info->call_code()->IsUndefined(isolate)) {
+ return false;
+ }
+ CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+ // CallApiCallbackStub expects the target in a register, so we count it out,
+ // and counts the receiver as an implicit argument, so we count the receiver
+ // out too.
+ int const argc = static_cast<int>(params.arity()) - 2;
+ if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
+ return false;
+ }
+ HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
+ if (!receiver.HasValue()) {
+ return false;
+ }
+ return receiver.Value()->IsUndefined(isolate) ||
+ (receiver.Value()->map()->IsJSObjectMap() &&
+ !receiver.Value()->map()->is_access_check_needed());
+}
+
} // namespace
+JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
+ Handle<JSObject> object,
+ Handle<FunctionTemplateInfo> function_template_info,
+ Handle<JSObject>* holder) {
+ DCHECK(object->map()->IsJSObjectMap());
+ Handle<Map> object_map(object->map());
+ Handle<FunctionTemplateInfo> expected_receiver_type;
+ if (!function_template_info->signature()->IsUndefined(isolate())) {
+ expected_receiver_type =
+ handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+ }
+ if (expected_receiver_type.is_null() ||
+ expected_receiver_type->IsTemplateFor(*object_map)) {
+ *holder = Handle<JSObject>::null();
+ return kHolderIsReceiver;
+ }
+ while (object_map->has_hidden_prototype()) {
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ object_map = handle(prototype->map());
+ if (expected_receiver_type->IsTemplateFor(*object_map)) {
+ *holder = prototype;
+ return kHolderFound;
+ }
+ }
+ return kHolderNotFound;
+}
+
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
@@ -251,6 +334,69 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceCallApiFunction(
+ Node* node, Node* target,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ Isolate* isolate = this->isolate();
+ CHECK(!isolate->serializer_enabled());
+ HeapObjectMatcher m(target);
+ DCHECK(m.HasValue() && m.Value()->IsJSFunction());
+ if (!CanInlineApiCall(isolate, node, function_template_info)) {
+ return NoChange();
+ }
+ Handle<CallHandlerInfo> call_handler_info(
+ handle(CallHandlerInfo::cast(function_template_info->call_code())));
+ Handle<Object> data(call_handler_info->data(), isolate);
+
+ Node* receiver_node = NodeProperties::GetValueInput(node, 1);
+ CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+
+ Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
+ bool const receiver_is_undefined = receiver->IsUndefined(isolate);
+ if (receiver_is_undefined) {
+ receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
+ } else {
+ DCHECK(receiver->map()->IsJSObjectMap() &&
+ !receiver->map()->is_access_check_needed());
+ }
+
+ Handle<JSObject> holder;
+ HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
+ function_template_info, &holder);
+ if (lookup == kHolderNotFound) return NoChange();
+ if (receiver_is_undefined) {
+ receiver_node = jsgraph()->HeapConstant(receiver);
+ NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+ }
+ Node* holder_node =
+ lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
+
+ Zone* zone = graph()->zone();
+ // Same as CanInlineApiCall: exclude the target (which goes in a register) and
+ // the receiver (which is implicitly counted by CallApiCallbackStub) from the
+ // arguments count.
+ int const argc = static_cast<int>(params.arity() - 2);
+ CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+ CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate, zone, cid,
+ cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+ ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ ExternalReference function_reference(
+ &api_function, ExternalReference::DIRECT_API_CALL, isolate);
+
+ // CallApiCallbackStub's register arguments: code, target, call data, holder,
+ // function address.
+ node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(zone, 2, jsgraph()->Constant(data));
+ node->InsertInput(zone, 3, holder_node);
+ node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+}
+
Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
@@ -280,6 +426,8 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
return ReduceFunctionPrototypeApply(node);
case Builtins::kFunctionPrototypeCall:
return ReduceFunctionPrototypeCall(node);
+ case Builtins::kFunctionPrototypeHasInstance:
+ return ReduceFunctionPrototypeHasInstance(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
case Builtins::kObjectPrototypeGetProto:
@@ -292,6 +440,12 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
if (*function == function->native_context()->array_function()) {
return ReduceArrayConstructor(node);
}
+
+ if (shared->IsApiFunction()) {
+ return ReduceCallApiFunction(
+ node, target,
+ handle(FunctionTemplateInfo::cast(shared->function_data())));
+ }
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -302,7 +456,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
isolate());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
ConvertReceiverMode const convert_mode =
- (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
+ (bound_this->IsNullOrUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
size_t arity = p.arity();
@@ -332,26 +486,37 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
return NoChange();
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
- if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(
- DeoptimizeKind::kSoft,
- DeoptimizeReason::kInsufficientTypeFeedbackForCall),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
+ if (nexus.IsUninitialized()) {
+ // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
+ if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
+
+ // Insert a CallIC here to collect feedback for uninitialized calls.
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ Callable callable =
+ CodeFactory::CallICInOptimizedCode(isolate(), p.convert_mode());
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
+ flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Constant(arg_count);
+ Node* slot_index =
+ jsgraph()->Constant(TypeFeedbackVector::GetIndex(p.feedback().slot()));
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, stub_arity);
+ node->InsertInput(graph()->zone(), 3, slot_index);
+ node->InsertInput(graph()->zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// Retrieve the Array function from the {node}.
@@ -412,7 +577,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
if (!function->IsConstructor()) {
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructedNonConstructable));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 81153f98dc..e39433a020 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -25,8 +25,7 @@ class JSCallReducer final : public AdvancedReducer {
// Flags that control the mode of operation.
enum Flag {
kNoFlags = 0u,
- kBailoutOnUninitialized = 1u << 0,
- kDeoptimizationEnabled = 1u << 1
+ kDeoptimizationEnabled = 1u << 0,
};
typedef base::Flags<Flag> Flags;
@@ -41,13 +40,23 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceCallApiFunction(
+ Node* node, Node* target,
+ Handle<FunctionTemplateInfo> function_template_info);
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceFunctionPrototypeHasInstance(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
+ enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+
+ HolderLookup LookupHolder(Handle<JSObject> object,
+ Handle<FunctionTemplateInfo> function_template_info,
+ Handle<JSObject>* holder);
+
Graph* graph() const;
Flags flags() const { return flags_; }
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index e02fc49de8..9a2edc13e3 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -28,50 +28,81 @@ Reduction JSContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSContextSpecialization::SimplifyJSLoadContext(Node* node,
+ Node* new_context,
+ size_t new_depth) {
+ DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+ const ContextAccess& access = ContextAccessOf(node->op());
+ DCHECK_LE(new_depth, access.depth());
+
+ if (new_depth == access.depth() &&
+ new_context == NodeProperties::GetContextInput(node)) {
+ return NoChange();
+ }
-MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
- Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
- node->opcode() == IrOpcode::kJSStoreContext);
- Node* const object = NodeProperties::GetValueInput(node, 0);
- return NodeProperties::GetSpecializationContext(object, context());
+ const Operator* op = jsgraph_->javascript()->LoadContext(
+ new_depth, access.index(), access.immutable());
+ NodeProperties::ReplaceContextInput(node, new_context);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
}
+Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
+ Node* new_context,
+ size_t new_depth) {
+ DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+ const ContextAccess& access = ContextAccessOf(node->op());
+ DCHECK_LE(new_depth, access.depth());
+
+ if (new_depth == access.depth() &&
+ new_context == NodeProperties::GetContextInput(node)) {
+ return NoChange();
+ }
+
+ const Operator* op =
+ jsgraph_->javascript()->StoreContext(new_depth, access.index());
+ NodeProperties::ReplaceContextInput(node, new_context);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- // Get the specialization context from the node.
- Handle<Context> context;
- if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
- // Find the right parent context.
const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = access.depth(); i > 0; --i) {
- context = handle(context->previous(), isolate());
+ size_t depth = access.depth();
+
+ // First walk up the context chain in the graph as far as possible.
+ Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+ Handle<Context> concrete;
+ if (!NodeProperties::GetSpecializationContext(outer, context())
+ .ToHandle(&concrete)) {
+ // We do not have a concrete context object, so we can only partially reduce
+ // the load by folding-in the outer context node.
+ return SimplifyJSLoadContext(node, outer, depth);
+ }
+
+ // Now walk up the concrete context chain for the remaining depth.
+ for (; depth > 0; --depth) {
+ concrete = handle(concrete->previous(), isolate());
}
- // If the access itself is mutable, only fold-in the parent.
if (!access.immutable()) {
- // The access does not have to look up a parent, nothing to fold.
- if (access.depth() == 0) {
- return NoChange();
- }
- const Operator* op = jsgraph_->javascript()->LoadContext(
- 0, access.index(), access.immutable());
- node->ReplaceInput(0, jsgraph_->Constant(context));
- NodeProperties::ChangeOp(node, op);
- return Changed(node);
+ // We found the requested context object but since the context slot is
+ // mutable we can only partially reduce the load.
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
- Handle<Object> value =
- handle(context->get(static_cast<int>(access.index())), isolate());
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot.
- // We must be conservative and check if the value in the slot is currently the
- // hole or undefined. If it is neither of these, then it must be initialized.
+ // We must be conservative and check if the value in the slot is currently
+ // the hole or undefined. Only if it is neither of these, can we be sure that
+ // it won't change anymore.
+ Handle<Object> value(concrete->get(static_cast<int>(access.index())),
+ isolate());
if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
- return NoChange();
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
// Success. The context load can be replaced with the constant.
@@ -86,24 +117,27 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- // Get the specialization context from the node.
- Handle<Context> context;
- if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
- // The access does not have to look up a parent, nothing to fold.
const ContextAccess& access = ContextAccessOf(node->op());
- if (access.depth() == 0) {
- return NoChange();
+ size_t depth = access.depth();
+
+ // First walk up the context chain in the graph until we reduce the depth to 0
+ // or hit a node that does not have a CreateXYZContext operator.
+ Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+ Handle<Context> concrete;
+ if (!NodeProperties::GetSpecializationContext(outer, context())
+ .ToHandle(&concrete)) {
+ // We do not have a concrete context object, so we can only partially reduce
+ // the load by folding-in the outer context node.
+ return SimplifyJSStoreContext(node, outer, depth);
}
- // Find the right parent context.
- for (size_t i = access.depth(); i > 0; --i) {
- context = handle(context->previous(), isolate());
+ // Now walk up the concrete context chain for the remaining depth.
+ for (; depth > 0; --depth) {
+ concrete = handle(concrete->previous(), isolate());
}
- node->ReplaceInput(0, jsgraph_->Constant(context));
- NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
- return Changed(node);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
}
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index ef784fc442..99172af446 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -30,8 +30,10 @@ class JSContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- // Returns the {Context} to specialize {node} to (if any).
- MaybeHandle<Context> GetSpecializationContext(Node* node);
+ Reduction SimplifyJSStoreContext(Node* node, Node* new_context,
+ size_t new_depth);
+ Reduction SimplifyJSLoadContext(Node* node, Node* new_context,
+ size_t new_depth);
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index c54b76b6cb..9a3cbd7894 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -38,6 +38,7 @@ class AllocationBuilder final {
// Primitive allocation of static size.
void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
Type* type = Type::Any()) {
+ DCHECK_LE(size, kMaxRegularHeapObjectSize);
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
@@ -161,7 +162,9 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
}
}
- } else if (!boilerplate->HasFastDoubleElements()) {
+ } else if (boilerplate->HasFastDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
return false;
}
}
@@ -176,7 +179,8 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -294,46 +298,130 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
if (outer_state->opcode() != IrOpcode::kFrameState) {
switch (type) {
case CreateArgumentsType::kMappedArguments: {
- // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ // TODO(bmeurer): Make deoptimization mandatory for the various
+ // arguments objects, so that we always have a shared_info here.
Handle<SharedFunctionInfo> shared_info;
- if (!state_info.shared_info().ToHandle(&shared_info) ||
- shared_info->has_duplicate_parameters()) {
- return NoChange();
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared_info->has_duplicate_parameters()) return NoChange();
+ // If there is no aliasing, the arguments object elements are not
+ // special in any way, we can just return an unmapped backing store.
+ if (shared_info->internal_formal_parameter_count() == 0) {
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewUnmappedArgumentsElements(0), effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(native_context()->sloppy_arguments_map(), isolate()));
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ a.Allocate(JSSloppyArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), length);
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
+ NodeProperties::ChangeOp(node, new_op);
+ }
+ return Changed(node);
}
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->RemoveInput(3); // Remove the frame state.
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
+ return NoChange();
}
case CreateArgumentsType::kUnmappedArguments: {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, new_op);
+ Handle<SharedFunctionInfo> shared_info;
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewUnmappedArgumentsElements(
+ shared_info->internal_formal_parameter_count()),
+ effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(native_context()->strict_arguments_map(), isolate()));
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ a.Allocate(JSStrictArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ }
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- Callable callable = CodeFactory::FastNewRestParameter(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, new_op);
+ Handle<SharedFunctionInfo> shared_info;
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewRestParameterElements(
+ shared_info->internal_formal_parameter_count()),
+ effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the JSArray object map.
+ Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+ native_context()->js_array_fast_elements_map_index(), isolate()));
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ }
return Changed(node);
}
}
@@ -663,17 +751,19 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ if (!FLAG_turbo_lower_create_closure) return NoChange();
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> shared = p.shared_info();
-
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
+
int const function_map_index =
Context::FunctionMapIndex(shared->language_mode(), shared->kind());
Node* function_map = jsgraph()->HeapConstant(
handle(Map::cast(native_context()->get(function_map_index)), isolate()));
+
// Note that it is only safe to embed the raw entry point of the compile
// lazy stub into the code, because that stub is immortal and immovable.
Node* compile_entry = jsgraph()->PointerConstant(
@@ -785,7 +875,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
- int slot_count = OpParameter<int>(node->op());
+ const CreateFunctionContextParameters& parameters =
+ CreateFunctionContextParametersOf(node->op());
+ int slot_count = parameters.slot_count();
+ ScopeType scope_type = parameters.scope_type();
Node* const closure = NodeProperties::GetValueInput(node, 0);
// Use inline allocation for function contexts up to a size limit.
@@ -798,7 +891,18 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
- a.AllocateArray(context_length, factory()->function_context_map());
+ Handle<Map> map;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ map = factory()->eval_context_map();
+ break;
+ case FUNCTION_SCOPE:
+ map = factory()->function_context_map();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ a.AllocateArray(context_length, map);
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -929,6 +1033,7 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(argument_count, factory()->fixed_array_map());
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
return a.Finish();
@@ -958,6 +1063,7 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(num_elements, factory()->fixed_array_map());
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
return a.Finish();
@@ -987,18 +1093,19 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Prepare an iterator over argument values recorded in the frame state.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
StateValuesAccess parameters_access(parameters);
- auto paratemers_it = ++parameters_access.begin();
+ auto parameters_it = ++parameters_access.begin();
// The unmapped argument values recorded in the frame state are stored yet
// another indirection away and then linked into the parameter map below,
// whereas mapped argument values are replaced with a hole instead.
AllocationBuilder aa(jsgraph(), effect, control);
aa.AllocateArray(argument_count, factory()->fixed_array_map());
- for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
}
- for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
- aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
Node* arguments = aa.Finish();
@@ -1081,13 +1188,15 @@ Node* JSCreateLowering::AllocateFastLiteral(
for (int i = 0; i < boilerplate_nof; ++i) {
PropertyDetails const property_details =
boilerplate_map->instance_descriptors()->GetDetails(i);
- if (property_details.type() != DATA) continue;
+ if (property_details.location() != kField) continue;
+ DCHECK_EQ(kData, property_details.kind());
Handle<Name> property_name(
boilerplate_map->instance_descriptors()->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
- FieldAccess access = {
- kTaggedBase, index.offset(), property_name,
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, index.offset(),
+ property_name, MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
Node* value;
if (boilerplate->IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
@@ -1104,23 +1213,15 @@ Node* JSCreateLowering::AllocateFastLiteral(
boilerplate_object, site_context);
site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) {
+ double number = Handle<HeapNumber>::cast(boilerplate_value)->value();
// Allocate a mutable HeapNumber box and store the value into it.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- value = effect = graph()->NewNode(
- simplified()->Allocate(pretenure),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), value,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- value, jsgraph()->Constant(
- Handle<HeapNumber>::cast(boilerplate_value)->value()),
- effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), value, effect);
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.Allocate(HeapNumber::kSize, pretenure);
+ builder.Store(AccessBuilder::ForMap(),
+ factory()->mutable_heap_number_map());
+ builder.Store(AccessBuilder::ForHeapNumberValue(),
+ jsgraph()->Constant(number));
+ value = effect = builder.Finish();
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
value = boilerplate_value->IsUninitialized(isolate())
@@ -1156,7 +1257,7 @@ Node* JSCreateLowering::AllocateFastLiteral(
AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
handle(boilerplate_array->length(), isolate()));
}
- for (auto const inobject_field : inobject_fields) {
+ for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
}
return builder.Finish();
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 55ec1bf41d..73e1b7dd24 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -27,6 +27,9 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
}
Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+ // JSFrameSpecialization should never run on interpreted frames, since the
+ // code below assumes standard stack frame layouts.
+ DCHECK(!frame()->is_interpreted());
DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
Handle<Object> value;
int index = OsrValueIndexOf(node->op());
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 250a9c26f6..ee844e9ee2 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-generic-lowering.h"
#include "src/ast/ast.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
@@ -153,75 +154,37 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
Callable callable =
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
- node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(1, vector);
- node->ReplaceInput(4, effect);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2, vector);
ReplaceWithStubCall(node, callable, flags);
}
@@ -230,33 +193,20 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* key = NodeProperties::GetValueInput(node, 1);
Node* value = NodeProperties::GetValueInput(node, 2);
- Node* closure = NodeProperties::GetValueInput(node, 3);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
Callable callable =
CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 1);
+ node->InsertInputs(zone(), 0, 2);
node->ReplaceInput(Descriptor::kReceiver, receiver);
node->ReplaceInput(Descriptor::kName, key);
node->ReplaceInput(Descriptor::kValue, value);
node->ReplaceInput(Descriptor::kSlot,
jsgraph()->SmiConstant(p.feedback().index()));
node->ReplaceInput(Descriptor::kVector, vector);
- node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
@@ -264,39 +214,25 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
- Node* closure = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable =
CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 2);
+ node->InsertInputs(zone(), 0, 3);
node->ReplaceInput(Descriptor::kReceiver, receiver);
node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
node->ReplaceInput(Descriptor::kValue, value);
node->ReplaceInput(Descriptor::kSlot,
jsgraph()->SmiConstant(p.feedback().index()));
node->ReplaceInput(Descriptor::kVector, vector);
- node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
- Node* closure = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -304,16 +240,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Callable callable =
CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
// Load global object from the context.
Node* native_context = effect =
graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
@@ -325,7 +252,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
effect, control);
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 3);
+ node->InsertInputs(zone(), 0, 4);
node->ReplaceInput(Descriptor::kReceiver, global);
node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
node->ReplaceInput(Descriptor::kValue, value);
@@ -336,6 +263,13 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
+ DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+ node->InsertInputs(zone(), 4, 2);
+ node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
+}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
LanguageMode language_mode = OpParameter<LanguageMode>(node);
@@ -344,6 +278,11 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
: Runtime::kDeleteProperty_Sloppy);
}
+void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::GetSuperConstructor(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -358,40 +297,12 @@ void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
}
void JSGenericLowering::LowerJSLoadContext(Node* node) {
- const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
- NodeProperties::GetValueInput(node, 0),
- jsgraph()->Int32Constant(
- Context::SlotOffset(Context::PREVIOUS_INDEX)),
- NodeProperties::GetEffectInput(node),
- graph()->start()));
- }
- node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
- static_cast<int>(access.index()))));
- node->AppendInput(zone(), graph()->start());
- NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSStoreContext(Node* node) {
- const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
- NodeProperties::GetValueInput(node, 0),
- jsgraph()->Int32Constant(
- Context::SlotOffset(Context::PREVIOUS_INDEX)),
- NodeProperties::GetEffectInput(node),
- graph()->start()));
- }
- node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
- node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
- static_cast<int>(access.index()))));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier)));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
@@ -438,11 +349,18 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- // Use the FastNewClosureStub only for functions allocated in new space.
+ // Use the FastNewClosurebuiltin only for functions allocated in new
+ // space.
if (p.pretenure() == NOT_TENURED) {
Callable callable = CodeFactory::FastNewClosure(isolate());
+ node->InsertInput(zone(), 1,
+ jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable, flags);
} else {
+ node->InsertInput(zone(), 1,
+ jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
? Runtime::kNewClosure_Tenured
: Runtime::kNewClosure);
@@ -451,14 +369,20 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
- int const slot_count = OpParameter<int>(node->op());
+ const CreateFunctionContextParameters& parameters =
+ CreateFunctionContextParametersOf(node->op());
+ int slot_count = parameters.slot_count();
+ ScopeType scope_type = parameters.scope_type();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
- Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+ if (slot_count <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable =
+ CodeFactory::FastNewFunctionContext(isolate(), scope_type);
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
ReplaceWithStubCall(node, callable, flags);
} else {
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(scope_type));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
}
}
@@ -478,11 +402,13 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
- // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
- // initial length limit for arrays with "fast" elements kind.
+ // Use the FastCloneShallowArray builtin only for shallow boilerplates without
+ // properties up to the number of elements that the stubs can handle.
if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
- p.length() < JSArray::kInitialMaxFastElementArray) {
- Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+ p.length() <
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements) {
+ Callable callable = CodeFactory::FastCloneShallowArray(
+ isolate(), DONT_TRACK_ALLOCATION_SITE);
ReplaceWithStubCall(node, callable, flags);
} else {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -498,10 +424,11 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- // Use the FastCloneShallowObjectStub only for shallow boilerplates without
- // elements up to the number of properties that the stubs can handle.
+ // Use the FastCloneShallowObject builtin only for shallow boilerplates
+ // without elements up to the number of properties that the stubs can handle.
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
- p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ p.length() <=
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties) {
Callable callable =
CodeFactory::FastCloneShallowObject(isolate(), p.length());
ReplaceWithStubCall(node, callable, flags);
@@ -574,6 +501,12 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCallConstructWithSpread(Node* node) {
+ CallConstructWithSpreadParameters const& p =
+ CallConstructWithSpreadParametersOf(node->op());
+ ReplaceWithRuntimeCall(node, Runtime::kNewWithSpread,
+ static_cast<int>(p.arity()));
+}
void JSGenericLowering::LowerJSCallFunction(Node* node) {
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
@@ -613,24 +546,12 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
- ExternalReference message_address =
- ExternalReference::address_of_pending_message_obj(isolate());
- node->RemoveInput(NodeProperties::FirstContextIndex(node));
- node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
- node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSStoreMessage(Node* node) {
- ExternalReference message_address =
- ExternalReference::address_of_pending_message_obj(isolate());
- node->RemoveInput(NodeProperties::FirstContextIndex(node));
- node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
- node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- StoreRepresentation representation(MachineRepresentation::kTagged,
- kNoWriteBarrier);
- NodeProperties::ChangeOp(node, machine()->Store(representation));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSLoadModule(Node* node) {
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index e9ff060dd8..2fe5cabc22 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -49,7 +49,8 @@ Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
namespace {
FieldAccess ForPropertyCellValue(MachineRepresentation representation,
- Type* type, Handle<Name> name) {
+ Type* type, MaybeHandle<Map> map,
+ Handle<Name> name) {
WriteBarrierKind kind = kFullWriteBarrier;
if (representation == MachineRepresentation::kTaggedSigned) {
kind = kNoWriteBarrier;
@@ -57,8 +58,8 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
kind = kPointerWriteBarrier;
}
MachineType r = MachineType::TypeForRepresentation(representation);
- FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
- kind};
+ FieldAccess access = {
+ kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
return access;
}
} // namespace
@@ -76,7 +77,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
Node* context = jsgraph()->HeapConstant(result.context);
Node* value = effect = graph()->NewNode(
javascript()->LoadContext(0, result.index, result.immutable), context,
- context, effect);
+ effect);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -115,6 +116,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
}
// Load from constant type cell can benefit from type feedback.
+ MaybeHandle<Map> map;
Type* property_cell_value_type = Type::NonInternal();
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_details.cell_type() == PropertyCellType::kConstantType) {
@@ -126,18 +128,24 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
property_cell_value_type = Type::Number();
representation = MachineRepresentation::kTaggedPointer;
} else {
- // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
- // below and use it in LoadElimination to eliminate map checks.
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
property_cell_value_type = Type::For(property_cell_value_map);
representation = MachineRepresentation::kTaggedPointer;
+
+ // We can only use the property cell value map for map check elimination
+ // if it's stable, i.e. the HeapObject wasn't mutated without the cell
+ // state being updated.
+ if (property_cell_value_map->is_stable()) {
+ dependencies()->AssumeMapStable(property_cell_value_map);
+ map = property_cell_value_map;
+ }
}
}
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
- representation, property_cell_value_type, name)),
- jsgraph()->HeapConstant(property_cell), effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(ForPropertyCellValue(
+ representation, property_cell_value_type, map, name)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -157,7 +165,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
if (result.immutable) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
- context, value, context, effect, control);
+ value, context, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -206,9 +214,11 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
value, effect, control);
// Check {value} map agains the {property_cell} map.
- effect = graph()->NewNode(
- simplified()->CheckMaps(1), value,
- jsgraph()->HeapConstant(property_cell_value_map), effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(property_cell_value_map)),
+ value, effect, control);
property_cell_value_type = Type::OtherInternal();
representation = MachineRepresentation::kTaggedPointer;
} else {
@@ -218,24 +228,21 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
}
- effect = graph()->NewNode(
- simplified()->StoreField(ForPropertyCellValue(
- representation, property_cell_value_type, name)),
- jsgraph()->HeapConstant(property_cell), value, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
+ representation, property_cell_value_type,
+ MaybeHandle<Map>(), name)),
+ jsgraph()->HeapConstant(property_cell), value,
+ effect, control);
break;
}
case PropertyCellType::kMutable: {
- // Store to non-configurable, data property on the global can be lowered
- // to a field store, even without recording a code dependency on the cell,
- // because the property cannot be deleted or reconfigured to an accessor
- // or interceptor property.
- if (property_details.IsConfigurable()) {
- // Protect lowering by recording a code dependency on the cell.
- dependencies()->AssumePropertyCell(property_cell);
- }
+ // Record a code dependency on the cell, and just deoptimize if the
+ // property ever becomes read-only.
+ dependencies()->AssumePropertyCell(property_cell);
effect = graph()->NewNode(
simplified()->StoreField(ForPropertyCellValue(
- MachineRepresentation::kTagged, Type::NonInternal(), name)),
+ MachineRepresentation::kTagged, Type::NonInternal(),
+ MaybeHandle<Map>(), name)),
jsgraph()->HeapConstant(property_cell), value, effect, control);
break;
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 8626cd1821..1fa7861d49 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -31,11 +31,26 @@ Node* JSGraph::ToNumberBuiltinConstant() {
Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
ArgvMode argv_mode, bool builtin_exit_frame) {
- if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
- result_size == 1) {
+ if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack) {
+ DCHECK(result_size >= 1 && result_size <= 3);
+ if (!builtin_exit_frame) {
+ CachedNode key;
+ if (result_size == 1) {
+ key = kCEntryStub1Constant;
+ } else if (result_size == 2) {
+ key = kCEntryStub2Constant;
+ } else {
+ DCHECK(result_size == 3);
+ key = kCEntryStub3Constant;
+ }
+ return CACHED(
+ key, HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+ argv_mode, builtin_exit_frame)
+ .GetCode()));
+ }
CachedNode key = builtin_exit_frame
- ? kCEntryStubWithBuiltinExitFrameConstant
- : kCEntryStubConstant;
+ ? kCEntryStub1WithBuiltinExitFrameConstant
+ : kCEntryStub1Constant;
return CACHED(key,
HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
argv_mode, builtin_exit_frame)
@@ -264,7 +279,8 @@ Node* JSGraph::ExternalConstant(Runtime::FunctionId function_id) {
}
Node* JSGraph::EmptyStateValues() {
- return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
+ return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(
+ 0, SparseInputMask::Dense())));
}
Node* JSGraph::Dead() {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index c2c0c77f42..e10591998c 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -162,8 +162,10 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
kToNumberBuiltinConstant,
- kCEntryStubConstant,
- kCEntryStubWithBuiltinExitFrameConstant,
+ kCEntryStub1Constant,
+ kCEntryStub2Constant,
+ kCEntryStub3Constant,
+ kCEntryStub1WithBuiltinExitFrameConstant,
kEmptyFixedArrayConstant,
kEmptyLiteralsArrayConstant,
kEmptyStringConstant,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index d6229c2d64..672d322a24 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -46,8 +46,8 @@ bool CanInlineFunction(Handle<JSFunction> function) {
// Built-in functions are handled by the JSBuiltinReducer.
if (function->shared()->HasBuiltinFunctionId()) return false;
- // Don't inline builtins.
- if (function->shared()->IsBuiltin()) return false;
+ // Only choose user code for inlining.
+ if (!function->shared()->IsUserJavaScript()) return false;
// Quick check on the size of the AST to avoid parsing large candidate.
if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 0e122a6c14..1717d4118a 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -4,25 +4,21 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast/ast-numbering.h"
#include "src/ast/ast.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
-#include "src/compiler/ast-graph-builder.h"
-#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
@@ -235,14 +231,14 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
NodeVector params(local_zone_);
for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
params.push_back(node->InputAt(1 + parameter));
}
- const Operator* op_param =
- common()->StateValues(static_cast<int>(params.size()));
+ const Operator* op_param = common()->StateValues(
+ static_cast<int>(params.size()), SparseInputMask::Dense());
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
return graph()->NewNode(op, params_node, node0, node0,
@@ -273,7 +269,7 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
return graph()->NewNode(op, node0, node0, node0,
jsgraph()->UndefinedConstant(), function,
@@ -311,11 +307,10 @@ bool NeedsConvertReceiver(Node* receiver, Node* effect) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
// Check if all maps have the given {instance_type}.
- for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
- HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
- if (!m.HasValue()) return true;
- Handle<Map> const map = Handle<Map>::cast(m.Value());
- if (!map->IsJSReceiverMap()) return true;
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (!maps[i]->IsJSReceiverMap()) return true;
}
return false;
}
@@ -385,6 +380,14 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
JSCallAccessor call(node);
Handle<SharedFunctionInfo> shared_info(function->shared());
+ // Inlining is only supported in the bytecode pipeline.
+ if (!info_->is_optimizing_from_bytecode()) {
+ TRACE("Inlining %s into %s is not supported in the deprecated pipeline\n",
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
// Function must be inlineable.
if (!shared_info->IsInlineable()) {
TRACE("Not inlining %s into %s because callee is not inlineable\n",
@@ -486,12 +489,11 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
Zone zone(info_->isolate()->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, shared_info);
- CompilationInfo info(&parse_info, function);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
- if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
- if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
+ info.MarkAsOptimizeFromBytecode();
- if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+ if (!Compiler::EnsureBytecode(&info)) {
TRACE("Not inlining %s into %s because bytecode generation failed\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -501,25 +503,6 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- if (!info.is_optimizing_from_bytecode() &&
- !Compiler::ParseAndAnalyze(info.parse_info())) {
- TRACE("Not inlining %s into %s because parsing failed\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- if (info_->isolate()->has_pending_exception()) {
- info_->isolate()->clear_pending_exception();
- }
- return NoChange();
- }
-
- if (!info.is_optimizing_from_bytecode() &&
- !Compiler::EnsureDeoptimizationSupport(&info)) {
- TRACE("Not inlining %s into %s because deoptimization support failed\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
@@ -540,33 +523,13 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Create the subgraph for the inlinee.
Node* start;
Node* end;
- if (info.is_optimizing_from_bytecode()) {
+ {
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
- BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
- call.frequency(), source_positions_,
- inlining_id);
- graph_builder.CreateGraph(false);
-
- // Extract the inlinee start/end nodes.
- start = graph()->start();
- end = graph()->end();
- } else {
- // Run the loop assignment analyzer on the inlinee.
- AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
- LoopAssignmentAnalysis* loop_assignment =
- loop_assignment_analyzer.Analyze();
-
- // Run the type hint analyzer on the inlinee.
- TypeHintAnalyzer type_hint_analyzer(&zone);
- TypeHintAnalysis* type_hint_analysis =
- type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
-
- // Run the AstGraphBuilder to create the subgraph.
- Graph::SubgraphScope scope(graph());
- AstGraphBuilderWithPositions graph_builder(
- &zone, &info, jsgraph(), call.frequency(), loop_assignment,
- type_hint_analysis, source_positions_, inlining_id);
+ BytecodeGraphBuilder graph_builder(
+ &zone, shared_info, handle(function->feedback_vector()),
+ BailoutId::None(), jsgraph(), call.frequency(), source_positions_,
+ inlining_id);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 52903232d7..2a7a3a3896 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -32,6 +32,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
+ case Runtime::kInlineDebugIsActive:
+ return ReduceDebugIsActive(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineGeneratorClose:
@@ -40,12 +42,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGeneratorGetInputOrDebugPos(node);
case Runtime::kInlineGeneratorGetResumeMode:
return ReduceGeneratorGetResumeMode(node);
+ case Runtime::kInlineGeneratorGetContext:
+ return ReduceGeneratorGetContext(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
- case Runtime::kInlineIsRegExp:
- return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -70,8 +72,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
- case Runtime::kInlineNewObject:
- return ReduceNewObject(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
default:
@@ -90,6 +90,15 @@ Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
context, effect);
}
+Reduction JSIntrinsicLowering::ReduceDebugIsActive(Node* node) {
+ Node* const value = jsgraph()->ExternalConstant(
+ ExternalReference::debug_is_active_address(isolate()));
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForExternalUint8Value());
+ return Change(node, op, value, effect, control);
+}
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
@@ -133,6 +142,16 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+ return Change(node, op, generator, effect, control);
+}
+
Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -277,10 +296,6 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
- return Change(node, CodeFactory::FastNewObject(isolate()), 0);
-}
-
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
Node* active_function = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 6e984ff496..2bc7cafa3d 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -40,8 +40,10 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
private:
Reduction ReduceCreateIterResultObject(Node* node);
+ Reduction ReduceDebugIsActive(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceGeneratorClose(Node* node);
+ Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
@@ -57,7 +59,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
- Reduction ReduceNewObject(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
Reduction Change(Node* node, const Operator* op);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index a849fec5aa..4ea15c10a2 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -71,6 +71,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSInstanceOf:
return ReduceJSInstanceOf(node);
+ case IrOpcode::kJSOrdinaryHasInstance:
+ return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadNamed:
@@ -81,6 +83,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ return ReduceJSStoreDataPropertyInLiteral(node);
default:
break;
}
@@ -125,15 +129,16 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
// Monomorphic property access.
- effect =
- BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+ effect = BuildCheckMaps(constructor, effect, control,
+ access_info.receiver_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
NodeProperties::ReplaceValueInput(node, object, 1);
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- return Changed(node);
+ Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
} else if (access_info.IsDataConstant()) {
DCHECK(access_info.constant()->IsCallable());
@@ -145,8 +150,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
// Monomorphic property access.
- effect =
- BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+ effect = BuildCheckMaps(constructor, effect, control,
+ access_info.receiver_maps());
// Call the @@hasInstance handler.
Node* target = jsgraph()->Constant(access_info.constant());
@@ -174,6 +179,31 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Node* object = NodeProperties::GetValueInput(node, 1);
+
+ // Check if the {constructor} is a JSBoundFunction.
+ HeapObjectMatcher m(constructor);
+ if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+ // OrdinaryHasInstance on bound functions turns into a recursive
+ // invocation of the instanceof operator again.
+ // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(function->bound_target_function());
+ NodeProperties::ReplaceValueInput(node, object, 0);
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->HeapConstant(bound_target_function), 1);
+ NodeProperties::ChangeOp(node, javascript()->InstanceOf());
+ Reduction const reduction = ReduceJSInstanceOf(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -217,7 +247,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// TODO(turbofan): Add support for inlining into try blocks.
bool is_exceptional = NodeProperties::IsExceptionalCall(node);
- for (auto access_info : access_infos) {
+ for (const auto& access_info : access_infos) {
if (access_info.IsAccessorConstant()) {
// Accessor in try-blocks are not supported yet.
if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
@@ -260,8 +290,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiver, effect, control);
} else {
// Monomorphic property access.
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
effect = BuildCheckMaps(receiver, effect, control,
access_info.receiver_maps());
}
@@ -299,8 +328,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -547,12 +575,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
length, effect, control);
- // Load the character from the {receiver}.
- value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+ // Return the character from the {receiver} as single character string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
control);
-
- // Return it as a single character string.
- value = graph()->NewNode(simplified()->StringFromCharCode(), value);
} else {
// Retrieve the native context from the given {node}.
// Compute element access infos for the receiver maps.
@@ -609,8 +634,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -621,13 +645,13 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Handle<Map> const transition_source = transition.first;
Handle<Map> const transition_target = transition.second;
effect = graph()->NewNode(
- simplified()->TransitionElementsKind(
+ simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source->elements_kind(),
transition_target->elements_kind())
? ElementsTransition::kFastTransition
- : ElementsTransition::kSlowTransition),
- receiver, jsgraph()->HeapConstant(transition_source),
- jsgraph()->HeapConstant(transition_target), effect, control);
+ : ElementsTransition::kSlowTransition,
+ transition_source, transition_target)),
+ receiver, effect, control);
}
// TODO(turbofan): The effect/control linearization will not find a
@@ -672,14 +696,13 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Handle<Map> const transition_target = transition.second;
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(
- IsSimpleMapChangeTransition(
- transition_source->elements_kind(),
- transition_target->elements_kind())
- ? ElementsTransition::kFastTransition
- : ElementsTransition::kSlowTransition),
- receiver, jsgraph()->HeapConstant(transition_source),
- jsgraph()->HeapConstant(transition_target), this_effect,
- this_control);
+ ElementsTransition(IsSimpleMapChangeTransition(
+ transition_source->elements_kind(),
+ transition_target->elements_kind())
+ ? ElementsTransition::kFastTransition
+ : ElementsTransition::kSlowTransition,
+ transition_source, transition_target)),
+ receiver, this_effect, this_control);
}
// Load the {receiver} map.
@@ -806,12 +829,9 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
length, effect, control);
- // Load the character from the {receiver}.
- value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- index, control);
-
- // Return it as a single character string.
- value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+ // Return the character from the {receiver} as single character string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+ control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -948,6 +968,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
AssumePrototypesStable(access_info.receiver_maps(), holder);
}
@@ -981,7 +1002,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
common()->FrameState(BailoutId::None(),
OutputFrameStateCombine::Ignore(),
frame_info0),
- graph()->NewNode(common()->StateValues(1), receiver),
+ graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+ receiver),
jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
context, target, frame_state);
@@ -998,16 +1020,16 @@ JSNativeContextSpecialization::BuildPropertyAccess(
Handle<FunctionTemplateInfo> function_template_info(
Handle<FunctionTemplateInfo>::cast(access_info.constant()));
DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ZoneVector<Node*> stack_parameters(graph()->zone());
ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, &stack_parameters,
- effect, control, shared_info, function_template_info);
+ receiver, context, target, frame_state0, nullptr, effect, control,
+ shared_info, function_template_info);
value = value_effect_control.value();
effect = value_effect_control.effect();
control = value_effect_control.control();
}
break;
}
+ case AccessMode::kStoreInLiteral:
case AccessMode::kStore: {
// We need a FrameState for the setter stub to restore the correct
// context and return the appropriate value to fullcodegen.
@@ -1018,7 +1040,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
common()->FrameState(BailoutId::None(),
OutputFrameStateCombine::Ignore(),
frame_info0),
- graph()->NewNode(common()->StateValues(2), receiver, value),
+ graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+ receiver, value),
jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
context, target, frame_state);
@@ -1035,11 +1058,9 @@ JSNativeContextSpecialization::BuildPropertyAccess(
Handle<FunctionTemplateInfo> function_template_info(
Handle<FunctionTemplateInfo>::cast(access_info.constant()));
DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ZoneVector<Node*> stack_parameters(graph()->zone());
- stack_parameters.push_back(value);
ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, &stack_parameters,
- effect, control, shared_info, function_template_info);
+ receiver, context, target, frame_state0, value, effect, control,
+ shared_info, function_template_info);
value = value_effect_control.value();
effect = value_effect_control.effect();
control = value_effect_control.control();
@@ -1059,12 +1080,21 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Optimize immutable property loads.
HeapObjectMatcher m(receiver);
if (m.HasValue() && m.Value()->IsJSObject()) {
+ // TODO(ishell): Use something simpler like
+ //
+ // Handle<Object> value =
+ // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+ // Representation::Tagged(), field_index);
+ //
+ // here, once we have the immutable bit in the access_info.
+
// TODO(turbofan): Given that we already have the field_index here, we
// might be smarter in the future and not rely on the LookupIterator,
// but for now let's just do what Crankshaft does.
LookupIterator it(m.Value(), name,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+ if (it.state() == LookupIterator::DATA && it.IsReadOnly() &&
+ !it.IsConfigurable()) {
Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
return ValueEffectControl(value, effect, control);
}
@@ -1080,6 +1110,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier};
@@ -1090,6 +1121,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
FieldAccess const storage_access = {kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -1099,9 +1131,18 @@ JSNativeContextSpecialization::BuildPropertyAccess(
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
+ } else if (field_representation ==
+ MachineRepresentation::kTaggedPointer) {
+ // Remember the map of the field value, if its map is stable. This is
+ // used by the LoadElimination to eliminate map checks on the result.
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ if (field_map->is_stable()) {
+ dependencies()->AssumeMapStable(field_map);
+ field_access.map = field_map;
+ }
+ }
}
- // TODO(turbofan): Track the field_map (if any) on the {field_access} and
- // use it in LoadElimination to eliminate map checks.
value = effect = graph()->NewNode(simplified()->LoadField(field_access),
storage, effect, control);
} else {
@@ -1138,6 +1179,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
FieldAccess const storage_access = {kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -1159,14 +1201,14 @@ JSNativeContextSpecialization::BuildPropertyAccess(
}
case MachineRepresentation::kTaggedPointer: {
// Ensure that {value} is a HeapObject.
- value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- value, effect, control);
+ value = BuildCheckHeapObject(value, &effect, control);
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
// Emit a map check for the value.
- effect = graph()->NewNode(simplified()->CheckMaps(1), value,
- jsgraph()->HeapConstant(field_map),
- effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map)),
+ value, effect, control);
}
field_access.write_barrier_kind = kPointerWriteBarrier;
break;
@@ -1226,6 +1268,79 @@ JSNativeContextSpecialization::BuildPropertyAccess(
return ValueEffectControl(value, effect, control);
}
+Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+
+ if (!p.feedback().IsValid()) return NoChange();
+
+ StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
+ p.feedback().slot());
+ if (nexus.IsUninitialized()) {
+ return NoChange();
+ }
+
+ if (nexus.ic_state() == MEGAMORPHIC) {
+ return NoChange();
+ }
+
+ DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
+
+ Handle<Map> receiver_map(nexus.FindFirstMap(), isolate());
+ Handle<Name> cached_name =
+ handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
+
+ PropertyAccessInfo access_info;
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ receiver_map, cached_name, AccessMode::kStoreInLiteral,
+ &access_info)) {
+ return NoChange();
+ }
+
+ if (access_info.IsGeneric()) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Monomorphic property access.
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
+
+ effect =
+ BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+
+ // Ensure that {name} matches the cached name.
+ Node* name = NodeProperties::GetValueInput(node, 1);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
+ jsgraph()->HeapConstant(cached_name));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+
+ // Generate the actual property access.
+ ValueEffectControl continuation = BuildPropertyAccess(
+ receiver, value, context, frame_state_lazy, effect, control, cached_name,
+ access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY,
+ p.feedback().vector(), p.feedback().slot());
+ value = continuation.value();
+ effect = continuation.effect();
+ control = continuation.control();
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
namespace {
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -1249,42 +1364,79 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
ElementAccessInfo const& access_info, AccessMode access_mode,
KeyedAccessStoreMode store_mode) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
MapList const& receiver_maps = access_info.receiver_maps();
- // Load the elements for the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
-
- // Don't try to store to a copy-on-write backing store.
- if (access_mode == AccessMode::kStore &&
- IsFastSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- effect =
- graph()->NewNode(simplified()->CheckMaps(1), elements,
- jsgraph()->FixedArrayMapConstant(), effect, control);
- }
-
if (IsFixedTypedArrayElementsKind(elements_kind)) {
- // Load the {receiver}s length.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
- receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ Node* buffer;
+ Node* length;
+ Node* base_pointer;
+ Node* external_pointer;
+
+ // Check if we can constant-fold information about the {receiver} (i.e.
+ // for asm.js-like code patterns).
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value()->IsJSTypedArray()) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
+
+ // Determine the {receiver}s (known) length.
+ length = jsgraph()->Constant(typed_array->length_value());
+
+ // Check if the {receiver}s buffer was neutered.
+ buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
+
+ // Load the (known) base and external pointer for the {receiver}. The
+ // {external_pointer} might be invalid if the {buffer} was neutered, so
+ // we need to make sure that any access is properly guarded.
+ base_pointer = jsgraph()->ZeroConstant();
+ external_pointer = jsgraph()->PointerConstant(
+ FixedTypedArrayBase::cast(typed_array->elements())
+ ->external_pointer());
+ } else {
+ // Load the {receiver}s length.
+ length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+ receiver, effect, control);
+
+ // Load the buffer for the {receiver}.
+ buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+
+ // Load the base and external pointer for the {receiver}s {elements}.
+ base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, effect, control);
+ external_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+ elements, effect, control);
+ }
- // Default to zero if the {receiver}s buffer was neutered.
- length = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), length);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Default to zero if the {receiver}s buffer was neutered.
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ length = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), length);
+ }
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Check that the {index} is a valid array index, we do the actual
@@ -1300,16 +1452,6 @@ JSNativeContextSpecialization::BuildElementAccess(
length, effect, control);
}
- // Load the base and external pointer for the {receiver}.
- Node* base_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, effect, control);
- Node* external_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
- elements, effect, control);
-
// Access the actual element.
ExternalArrayType external_array_type =
GetArrayTypeFromElementsKind(elements_kind);
@@ -1320,6 +1462,9 @@ JSNativeContextSpecialization::BuildElementAccess(
base_pointer, external_pointer, index, effect, control);
break;
}
+ case AccessMode::kStoreInLiteral:
+ UNREACHABLE();
+ break;
case AccessMode::kStore: {
// Ensure that the {value} is actually a Number.
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1369,6 +1514,22 @@ JSNativeContextSpecialization::BuildElementAccess(
}
}
} else {
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind) &&
+ store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ elements, effect, control);
+ }
+
// Check if the {receiver} is a JSArray.
bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
@@ -1500,25 +1661,25 @@ JSNativeContextSpecialization::BuildElementAccess(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state,
- ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
- Handle<SharedFunctionInfo> shared_info,
+ Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
+ Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info) {
Handle<CallHandlerInfo> call_handler_info = handle(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
Handle<Object> call_data_object(call_handler_info->data(), isolate());
+ // Only setters have a value.
+ int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
CallApiCallbackStub stub(
- isolate(), static_cast<int>(stack_parameters->size()),
- call_data_object->IsUndefined(isolate()),
- true /* TODO(epertoso): similar to CallOptimization */);
+ isolate(), argc, call_data_object->IsUndefined(isolate()),
+ true /* FunctionTemplateInfo doesn't have an associated context. */);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), call_interface_descriptor,
- call_interface_descriptor.GetStackParameterCount() +
- static_cast<int>(stack_parameters->size()) + 1,
+ call_interface_descriptor.GetStackParameterCount() + argc +
+ 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
@@ -1529,42 +1690,62 @@ JSNativeContextSpecialization::InlineApiCall(
&function, ExternalReference::DIRECT_API_CALL, isolate())));
Node* code = jsgraph()->HeapConstant(stub.GetCode());
- ZoneVector<Node*> inputs(zone());
- inputs.push_back(code);
-
- // CallApiCallbackStub's register arguments.
- inputs.push_back(target);
- inputs.push_back(data);
- inputs.push_back(receiver);
- inputs.push_back(function_reference);
-
- // Stack parameters: CallApiCallbackStub expects the first one to be the
- // receiver.
- inputs.push_back(receiver);
- for (Node* node : *stack_parameters) {
- inputs.push_back(node);
+ // Add CallApiCallbackStub's register argument as well.
+ Node* inputs[11] = {
+ code, target, data, receiver /* holder */, function_reference, receiver};
+ int index = 6 + argc;
+ inputs[index++] = context;
+ inputs[index++] = frame_state;
+ inputs[index++] = effect;
+ inputs[index++] = control;
+ // This needs to stay here because of the edge case described in
+ // http://crbug.com/675648.
+ if (value != nullptr) {
+ inputs[6] = value;
}
- inputs.push_back(context);
- inputs.push_back(frame_state);
- inputs.push_back(effect);
- inputs.push_back(control);
Node* effect0;
Node* value0 = effect0 =
- graph()->NewNode(common()->Call(call_descriptor),
- static_cast<int>(inputs.size()), inputs.data());
+ graph()->NewNode(common()->Call(call_descriptor), index, inputs);
Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
return ValueEffectControl(value0, effect0, control0);
}
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+ Node** effect,
+ Node* control) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSTypeOf: {
+ return receiver;
+ }
+ default: {
+ return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, *effect, control);
+ }
+ }
+}
+
Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& maps) {
+ std::vector<Handle<Map>> const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
Handle<Map> receiver_map(m.Value()->map(), isolate());
if (receiver_map->is_stable()) {
- for (Handle<Map> map : maps) {
+ for (Handle<Map> map : receiver_maps) {
if (map.is_identical_to(receiver_map)) {
dependencies()->AssumeMapStable(receiver_map);
return effect;
@@ -1572,17 +1753,16 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
}
}
}
- int const map_input_count = static_cast<int>(maps.size());
- int const input_count = 1 + map_input_count + 1 + 1;
- Node** inputs = zone()->NewArray<Node*>(input_count);
- inputs[0] = receiver;
- for (int i = 0; i < map_input_count; ++i) {
- inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+ ZoneHandleSet<Map> maps;
+ CheckMapsFlags flags = CheckMapsFlag::kNone;
+ for (Handle<Map> map : receiver_maps) {
+ maps.insert(map, graph()->zone());
+ if (map->is_migration_target()) {
+ flags |= CheckMapsFlag::kTryMigrateInstance;
+ }
}
- inputs[input_count - 2] = effect;
- inputs[input_count - 1] = control;
- return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
- inputs);
+ return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ effect, control);
}
void JSNativeContextSpecialization::AssumePrototypesStable(
@@ -1671,11 +1851,11 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
HeapObjectMatcher mtarget(m.InputAt(0));
HeapObjectMatcher mnewtarget(m.InputAt(1));
if (mtarget.HasValue() && mnewtarget.HasValue()) {
- Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(mtarget.Value());
- if (constructor->has_initial_map()) {
- Handle<Map> initial_map(constructor->initial_map(), isolate());
- if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(mnewtarget.Value());
+ if (original_constructor->has_initial_map()) {
+ Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mtarget.Value()) {
// Walk up the {effect} chain to see if the {receiver} is the
// dominating effect and there's no other observable write in
// between.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 2d07061d11..7fedf32e92 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -54,11 +54,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
private:
Reduction ReduceJSInstanceOf(Node* node);
+ Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandleList const& receiver_maps,
@@ -116,6 +118,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessStoreMode store_mode);
+ // Construct an appropriate heap object check.
+ Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+
// Construct an appropriate map check.
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
std::vector<Handle<Map>> const& maps);
@@ -146,7 +151,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
ValueEffectControl InlineApiCall(
Node* receiver, Node* context, Node* target, Node* frame_state,
- ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+ Node* parameter, Node* effect, Node* control,
Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index f64630c589..d9674c1bed 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -10,6 +10,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/type-feedback-vector.h"
namespace v8 {
@@ -20,7 +21,7 @@ VectorSlotPair::VectorSlotPair() {}
int VectorSlotPair::index() const {
- return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
+ return vector_.is_null() ? -1 : TypeFeedbackVector::GetIndex(slot_);
}
@@ -80,6 +81,30 @@ CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
return OpParameter<CallConstructParameters>(op);
}
+bool operator==(CallConstructWithSpreadParameters const& lhs,
+ CallConstructWithSpreadParameters const& rhs) {
+ return lhs.arity() == rhs.arity();
+}
+
+bool operator!=(CallConstructWithSpreadParameters const& lhs,
+ CallConstructWithSpreadParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CallConstructWithSpreadParameters const& p) {
+ return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CallConstructWithSpreadParameters const& p) {
+ return os << p.arity();
+}
+
+CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallConstructWithSpread, op->opcode());
+ return OpParameter<CallConstructWithSpreadParameters>(op);
+}
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
@@ -191,6 +216,60 @@ CreateCatchContextParameters const& CreateCatchContextParametersOf(
return OpParameter<CreateCatchContextParameters>(op);
}
+CreateFunctionContextParameters::CreateFunctionContextParameters(
+ int slot_count, ScopeType scope_type)
+ : slot_count_(slot_count), scope_type_(scope_type) {}
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs) {
+ return lhs.slot_count() == rhs.slot_count() &&
+ lhs.scope_type() == rhs.scope_type();
+}
+
+bool operator!=(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateFunctionContextParameters const& parameters) {
+ return base::hash_combine(parameters.slot_count(),
+ static_cast<int>(parameters.scope_type()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateFunctionContextParameters const& parameters) {
+ return os << parameters.slot_count() << ", " << parameters.scope_type();
+}
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, op->opcode());
+ return OpParameter<CreateFunctionContextParameters>(op);
+}
+
+bool operator==(DataPropertyParameters const& lhs,
+ DataPropertyParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(DataPropertyParameters const& lhs,
+ DataPropertyParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(DataPropertyParameters const& p) {
+ return base::hash_combine(p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, DataPropertyParameters const& p) {
+ return os;
+}
+
+DataPropertyParameters const& DataPropertyParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+ return OpParameter<DataPropertyParameters>(op);
+}
+
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
@@ -350,6 +429,7 @@ const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
+ lhs.feedback() == rhs.feedback() &&
lhs.shared_info().location() == rhs.shared_info().location();
}
@@ -361,7 +441,8 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
- return base::hash_combine(p.pretenure(), p.shared_info().location());
+ return base::hash_combine(p.pretenure(), p.shared_info().location(),
+ p.feedback());
}
@@ -436,26 +517,27 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
return OpParameter<CompareOperationHint>(op);
}
-#define CACHED_OP_LIST(V) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(LoadMessage, Operator::kNoThrow, 0, 1) \
- V(StoreMessage, Operator::kNoThrow, 1, 0) \
- V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0)
+#define CACHED_OP_LIST(V) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kPure, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+ V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
#define BINARY_OP_LIST(V) \
V(BitwiseOr) \
@@ -527,6 +609,9 @@ struct JSOperatorGlobalCache final {
Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
Name##Operator<CompareOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
+ Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<CompareOperationHint::kInternalizedString> \
+ k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
@@ -578,6 +663,10 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##NumberOperator; \
case CompareOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
+ case CompareOperationHint::kInternalizedString: \
+ return &cache_.k##Name##InternalizedStringOperator; \
+ case CompareOperationHint::kString: \
+ return &cache_.k##Name##StringOperator; \
case CompareOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -587,6 +676,17 @@ BINARY_OP_LIST(BINARY_OP)
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
+const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
+ const VectorSlotPair& feedback) {
+ DataPropertyParameters parameters(feedback);
+ return new (zone()) Operator1<DataPropertyParameters>( // --
+ IrOpcode::kJSStoreDataPropertyInLiteral,
+ Operator::kNoThrow, // opcode
+ "JSStoreDataPropertyInLiteral", // name
+ 4, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
return new (zone()) Operator1<ToBooleanHints>( //--
@@ -643,6 +743,14 @@ const Operator* JSOperatorBuilder::CallConstruct(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallConstructWithSpread(uint32_t arity) {
+ CallConstructWithSpreadParameters parameters(arity);
+ return new (zone()) Operator1<CallConstructWithSpreadParameters>( // --
+ IrOpcode::kJSCallConstructWithSpread, Operator::kNoProperties, // opcode
+ "JSCallConstructWithSpread", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
const Operator* JSOperatorBuilder::ConvertReceiver(
ConvertReceiverMode convert_mode) {
@@ -659,7 +767,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
- 2, 1, 1, 1, 1, 2, // counts
+ 1, 1, 1, 1, 1, 2, // counts
access); // parameter
}
@@ -669,7 +777,7 @@ const Operator* JSOperatorBuilder::LoadProperty(
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
- 3, 1, 1, 1, 1, 2, // counts
+ 2, 1, 1, 1, 1, 2, // counts
access); // parameter
}
@@ -696,7 +804,7 @@ const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
- 3, 1, 1, 0, 1, 2, // counts
+ 2, 1, 1, 0, 1, 2, // counts
access); // parameter
}
@@ -707,7 +815,7 @@ const Operator* JSOperatorBuilder::StoreProperty(
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
"JSStoreProperty", // name
- 4, 1, 1, 0, 1, 2, // counts
+ 3, 1, 1, 0, 1, 2, // counts
access); // parameter
}
@@ -728,7 +836,7 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
return new (zone()) Operator1<LoadGlobalParameters>( // --
IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
"JSLoadGlobal", // name
- 1, 1, 1, 1, 1, 2, // counts
+ 0, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
@@ -740,7 +848,7 @@ const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
return new (zone()) Operator1<StoreGlobalParameters>( // --
IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
"JSStoreGlobal", // name
- 2, 1, 1, 0, 1, 2, // counts
+ 1, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -752,7 +860,7 @@ const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
IrOpcode::kJSLoadContext, // opcode
Operator::kNoWrite | Operator::kNoThrow, // flags
"JSLoadContext", // name
- 1, 1, 0, 1, 1, 0, // counts
+ 0, 1, 0, 1, 1, 0, // counts
access); // parameter
}
@@ -763,7 +871,7 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
IrOpcode::kJSStoreContext, // opcode
Operator::kNoRead | Operator::kNoThrow, // flags
"JSStoreContext", // name
- 2, 1, 1, 0, 1, 0, // counts
+ 1, 1, 1, 0, 1, 0, // counts
access); // parameter
}
@@ -806,10 +914,10 @@ const Operator* JSOperatorBuilder::CreateArray(size_t arity,
parameters); // parameter
}
-
const Operator* JSOperatorBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
- CreateClosureParameters parameters(shared_info, pretenure);
+ Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
+ PretenureFlag pretenure) {
+ CreateClosureParameters parameters(shared_info, feedback, pretenure);
return new (zone()) Operator1<CreateClosureParameters>( // --
IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
"JSCreateClosure", // name
@@ -818,8 +926,8 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
- int number_of_elements) {
+ Handle<ConstantElementsPair> constant_elements, int literal_flags,
+ int literal_index, int number_of_elements) {
CreateLiteralParameters parameters(constant_elements, number_of_elements,
literal_flags, literal_index);
return new (zone()) Operator1<CreateLiteralParameters>( // --
@@ -853,13 +961,14 @@ const Operator* JSOperatorBuilder::CreateLiteralRegExp(
parameters); // parameter
}
-
-const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count,
+ ScopeType scope_type) {
+ CreateFunctionContextParameters parameters(slot_count, scope_type);
+ return new (zone()) Operator1<CreateFunctionContextParameters>( // --
IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties, // opcode
"JSCreateFunctionContext", // name
1, 1, 1, 1, 1, 2, // counts
- slot_count); // parameter
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateCatchContext(
@@ -882,22 +991,21 @@ const Operator* JSOperatorBuilder::CreateWithContext(
}
const Operator* JSOperatorBuilder::CreateBlockContext(
- const Handle<ScopeInfo>& scpope_info) {
+ const Handle<ScopeInfo>& scope_info) {
return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
1, 1, 1, 1, 1, 2, // counts
- scpope_info); // parameter
+ scope_info); // parameter
}
-
const Operator* JSOperatorBuilder::CreateScriptContext(
- const Handle<ScopeInfo>& scpope_info) {
+ const Handle<ScopeInfo>& scope_info) {
return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
"JSCreateScriptContext", // name
1, 1, 1, 1, 1, 2, // counts
- scpope_info); // parameter
+ scope_info); // parameter
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 9cdd30594a..b9902931fc 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -80,6 +80,31 @@ std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
CallConstructParameters const& CallConstructParametersOf(Operator const*);
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSCallConstructWithSpread
+// operators.
+class CallConstructWithSpreadParameters final {
+ public:
+ explicit CallConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+ uint32_t arity() const { return arity_; }
+
+ private:
+ uint32_t const arity_;
+};
+
+bool operator==(CallConstructWithSpreadParameters const&,
+ CallConstructWithSpreadParameters const&);
+bool operator!=(CallConstructWithSpreadParameters const&,
+ CallConstructWithSpreadParameters const&);
+
+size_t hash_value(CallConstructWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&,
+ CallConstructWithSpreadParameters const&);
+
+CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ Operator const*);
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
@@ -216,6 +241,56 @@ std::ostream& operator<<(std::ostream& os,
CreateCatchContextParameters const& CreateCatchContextParametersOf(
Operator const*);
+// Defines the slot count and ScopeType for a new function or eval context. This
+// is used as a parameter by the JSCreateFunctionContext operator.
+class CreateFunctionContextParameters final {
+ public:
+ CreateFunctionContextParameters(int slot_count, ScopeType scope_type);
+
+ int slot_count() const { return slot_count_; }
+ ScopeType scope_type() const { return scope_type_; }
+
+ private:
+ int const slot_count_;
+ ScopeType const scope_type_;
+};
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
+bool operator!=(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
+
+size_t hash_value(CreateFunctionContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+ CreateFunctionContextParameters const& parameters);
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+ Operator const*);
+
+// Defines the feedback, i.e., vector and index, for storing a data property in
+// an object literal. This is
+// used as a parameter by the JSStoreDataPropertyInLiteral operator.
+class DataPropertyParameters final {
+ public:
+ explicit DataPropertyParameters(VectorSlotPair const& feedback)
+ : feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(DataPropertyParameters const&, DataPropertyParameters const&);
+bool operator!=(DataPropertyParameters const&, DataPropertyParameters const&);
+
+size_t hash_value(DataPropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, DataPropertyParameters const&);
+
+const DataPropertyParameters& DataPropertyParametersOf(const Operator* op);
+
// Defines the property of an object for a named access. This is
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
class NamedAccess final {
@@ -361,14 +436,17 @@ const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
class CreateClosureParameters final {
public:
CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
+ VectorSlotPair const& feedback,
PretenureFlag pretenure)
- : shared_info_(shared_info), pretenure_(pretenure) {}
+ : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
PretenureFlag pretenure() const { return pretenure_; }
private:
const Handle<SharedFunctionInfo> shared_info_;
+ VectorSlotPair const feedback_;
const PretenureFlag pretenure_;
};
@@ -456,10 +534,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ VectorSlotPair const& feedback,
PretenureFlag pretenure);
const Operator* CreateIterResultObject();
const Operator* CreateKeyValueArray();
- const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+ const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
int literal_flags, int literal_index,
int number_of_elements);
const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
@@ -478,6 +557,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
const Operator* CallConstruct(uint32_t arity, float frequency,
VectorSlotPair const& feedback);
+ const Operator* CallConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -489,10 +569,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
VectorSlotPair const& feedback);
+ const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
+
const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
+ const Operator* GetSuperConstructor();
+
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -525,7 +609,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StackCheck();
- const Operator* CreateFunctionContext(int slot_count);
+ const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
const Operator* CreateCatchContext(const Handle<String>& name,
const Handle<ScopeInfo>& scope_info);
const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index dbbeca6e96..54c8713578 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -69,12 +69,24 @@ class JSBinopReduction final {
return true;
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kInternalizedString:
break;
}
}
return false;
}
+ bool IsInternalizedStringCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kInternalizedString) &&
+ BothInputsMaybe(Type::InternalizedString());
+ }
+ return false;
+ }
+
// Check if a string addition will definitely result in creating a ConsString,
// i.e. if the combined length of the resulting string exceeds the ConsString
// minimum length.
@@ -103,6 +115,25 @@ class JSBinopReduction final {
return false;
}
+ // Checks that both inputs are InternalizedString, and if we don't know
+ // statically that one side is already an InternalizedString, insert a
+ // CheckInternalizedString node.
+ void CheckInputsToInternalizedString() {
+ if (!left_type()->Is(Type::UniqueName())) {
+ Node* left_input = graph()->NewNode(
+ simplified()->CheckInternalizedString(), left(), effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+ if (!right_type()->Is(Type::UniqueName())) {
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckInternalizedString(), right(),
+ effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
void ConvertInputsToNumber() {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
@@ -316,6 +347,10 @@ class JSBinopReduction final {
bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
+ bool BothInputsMaybe(Type* t) {
+ return left_type()->Maybe(t) && right_type()->Maybe(t);
+ }
+
bool OneInputCannotBe(Type* t) {
return !left_type()->Maybe(t) || !right_type()->Maybe(t);
}
@@ -459,8 +494,6 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- the_hole_type_(
- Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -850,6 +883,13 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::UniqueName())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ }
+ if (r.IsInternalizedStringCompareOperation()) {
+ r.CheckInputsToInternalizedString();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ }
if (r.BothInputsAre(Type::String())) {
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
@@ -912,25 +952,14 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
if (reduction.Changed()) return reduction;
- if (r.OneInputIs(the_hole_type_)) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Undefined())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Null())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Boolean())) {
+ if (r.BothInputsAre(Type::Unique())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
- if (r.OneInputIs(Type::Object())) {
+ if (r.OneInputIs(Type::NonStringUniqueOrHole())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
- if (r.OneInputIs(Type::Receiver())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.BothInputsAre(Type::Unique())) {
+ if (r.IsInternalizedStringCompareOperation()) {
+ r.CheckInputsToInternalizedString();
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
@@ -958,7 +987,6 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
- RelaxEffectsAndControls(node);
node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
jsgraph()->ZeroConstant()));
node->TrimInputCount(1);
@@ -966,10 +994,25 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Changed(node);
} else if (input_type->Is(Type::Number())) {
// JSToBoolean(x:number) => NumberToBoolean(x)
- RelaxEffectsAndControls(node);
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
return Changed(node);
+ } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
+ // JSToBoolean(x:detectable receiver \/ null)
+ // => BooleanNot(ReferenceEqual(x,#null))
+ node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
+ input, jsgraph()->NullConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
+ // JSToBoolean(x:receiver \/ null \/ undefined)
+ // => BooleanNot(ObjectIsUndetectable(x))
+ node->ReplaceInput(
+ 0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
}
return NoChange();
}
@@ -1239,6 +1282,9 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 2);
Type* key_type = NodeProperties::GetType(key);
Type* value_type = NodeProperties::GetType(value);
+
+ if (!value_type->Is(Type::PlainPrimitive())) return NoChange();
+
HeapObjectMatcher mbase(base);
if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
@@ -1257,7 +1303,6 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Convert to a number first.
@@ -1266,12 +1311,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
- Node* frame_state_for_to_number =
- NodeProperties::FindFrameStateBefore(node);
- value = effect =
- graph()->NewNode(javascript()->ToNumber(), value, context,
- frame_state_for_to_number, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), value);
+ value =
+ graph()->NewNode(simplified()->PlainPrimitiveToNumber(), value);
}
}
// Check if we can avoid the bounds check.
@@ -1316,11 +1357,30 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Node* constructor = NodeProperties::GetValueInput(node, 0);
Type* constructor_type = NodeProperties::GetType(constructor);
Node* object = NodeProperties::GetValueInput(node, 1);
+ Type* object_type = NodeProperties::GetType(object);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Check if the {constructor} cannot be callable.
+ // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
+ if (!constructor_type->Maybe(Type::Callable())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // If the {constructor} cannot be a JSBoundFunction and then {object}
+ // cannot be a JSReceiver, then this can be constant-folded to false.
+ // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 2 and 3.
+ if (!object_type->Maybe(Type::Receiver()) &&
+ !constructor_type->Maybe(Type::BoundFunction())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
// Check if the {constructor} is a (known) JSFunction.
if (!constructor_type->IsHeapConstant() ||
!constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -1473,16 +1533,17 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- Node* previous = effect = graph()->NewNode(
+ context = effect = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control);
- node->ReplaceInput(0, previous);
+ context, effect, control);
}
+ node->ReplaceInput(0, context);
node->ReplaceInput(1, effect);
- node->ReplaceInput(2, control);
+ node->AppendInput(jsgraph()->zone(), control);
NodeProperties::ChangeOp(
node,
simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
@@ -1493,15 +1554,17 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
Node* control = graph()->start();
+ Node* value = NodeProperties::GetValueInput(node, 0);
for (size_t i = 0; i < access.depth(); ++i) {
- Node* previous = effect = graph()->NewNode(
+ context = effect = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control);
- node->ReplaceInput(0, previous);
+ context, effect, control);
}
- node->RemoveInput(2);
+ node->ReplaceInput(0, context);
+ node->ReplaceInput(1, value);
node->ReplaceInput(2, effect);
NodeProperties::ChangeOp(
node,
@@ -1614,10 +1677,10 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
} else {
Node* native_context = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
+ context, effect);
receiver = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, effect);
+ native_context, effect);
}
ReplaceWithValue(node, receiver, effect, control);
return Replace(receiver);
@@ -1719,10 +1782,10 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
} else {
Node* native_context = eglobal = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, eglobal);
+ context, eglobal);
rglobal = eglobal = graph()->NewNode(
javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, eglobal);
+ native_context, eglobal);
}
}
@@ -2031,6 +2094,15 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // We know that the {index} is in Unsigned32 range here, otherwise executing
+ // the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
+ // this is not always reflected in the types, hence we might need to rename
+ // the {index} here.
+ if (!NodeProperties::GetType(index)->Is(Type::Unsigned32())) {
+ index = graph()->NewNode(common()->TypeGuard(Type::Unsigned32()), index,
+ control);
+ }
+
// Load the next {key} from the {cache_array}.
Node* key = effect = graph()->NewNode(
simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
@@ -2085,6 +2157,28 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
+ ExternalReference const ref =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+ NodeProperties::ChangeOp(
+ node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+ return Changed(node);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode());
+ ExternalReference const ref =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+ node->ReplaceInput(1, value);
+ NodeProperties::ChangeOp(
+ node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2095,7 +2189,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
int register_count = OpParameter<int>(node);
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
@@ -2149,7 +2243,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
int index = OpParameter<int>(node);
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
@@ -2235,6 +2329,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCallFunction(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
+ case IrOpcode::kJSLoadMessage:
+ return ReduceJSLoadMessage(node);
+ case IrOpcode::kJSStoreMessage:
+ return ReduceJSStoreMessage(node);
case IrOpcode::kJSGeneratorStore:
return ReduceJSGeneratorStore(node);
case IrOpcode::kJSGeneratorRestoreContinuation:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 3e710226b4..20f35f1fe1 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -73,6 +73,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInNext(Node* node);
+ Reduction ReduceJSLoadMessage(Node* node);
+ Reduction ReduceJSStoreMessage(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
@@ -96,7 +98,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Flags flags_;
JSGraph* jsgraph_;
Type* shifted_int32_ranges_[4];
- Type* const the_hole_type_;
TypeCache const& type_cache_;
};
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 971ea7212d..2458f65867 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -5,7 +5,6 @@
#include "src/compiler/linkage.h"
#include "src/ast/scopes.h"
-#include "src/builtins/builtins-utils.h"
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
@@ -152,7 +151,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kGeneratorGetContinuation:
- case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
@@ -179,7 +177,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineGeneratorClose:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
- case Runtime::kInlineGetSuperConstructor:
case Runtime::kInlineIsArray:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index e50ebe1919..6c2935f7ca 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -8,6 +8,8 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -320,6 +322,42 @@ void LoadElimination::AbstractField::Print() const {
}
}
+bool LoadElimination::AbstractMaps::Lookup(
+ Node* object, ZoneHandleSet<Map>* object_maps) const {
+ for (auto pair : info_for_node_) {
+ if (MustAlias(object, pair.first)) {
+ *object_maps = pair.second;
+ return true;
+ }
+ }
+ return false;
+}
+
+LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
+ Node* object, Zone* zone) const {
+ for (auto pair : this->info_for_node_) {
+ if (MayAlias(object, pair.first)) {
+ AbstractMaps* that = new (zone) AbstractMaps(zone);
+ for (auto pair : this->info_for_node_) {
+ if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+ }
+ return that;
+ }
+ }
+ return this;
+}
+
+void LoadElimination::AbstractMaps::Print() const {
+ for (auto pair : info_for_node_) {
+ PrintF(" #%d:%s\n", pair.first->id(), pair.first->op()->mnemonic());
+ OFStream os(stdout);
+ ZoneHandleSet<Map> const& maps = pair.second;
+ for (size_t i = 0; i < maps.size(); ++i) {
+ os << " - " << Brief(*maps[i]) << "\n";
+ }
+ }
+}
+
bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
if (this->checks_) {
if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
@@ -344,6 +382,13 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
return false;
}
}
+ if (this->maps_) {
+ if (!that->maps_ || !that->maps_->Equals(this->maps_)) {
+ return false;
+ }
+ } else if (that->maps_) {
+ return false;
+ }
return true;
}
@@ -372,6 +417,11 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
}
}
+
+ // Merge the information we have about the maps.
+ if (this->maps_) {
+ this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
+ }
}
Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -389,6 +439,35 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::AddCheck(
return that;
}
+bool LoadElimination::AbstractState::LookupMaps(
+ Node* object, ZoneHandleSet<Map>* object_map) const {
+ return this->maps_ && this->maps_->Lookup(object, object_map);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
+ Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->maps_) {
+ that->maps_ = that->maps_->Extend(object, maps, zone);
+ } else {
+ that->maps_ = new (zone) AbstractMaps(object, maps, zone);
+ }
+ return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
+ Node* object, Zone* zone) const {
+ if (this->maps_) {
+ AbstractMaps const* that_maps = this->maps_->Kill(object, zone);
+ if (this->maps_ != that_maps) {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->maps_ = that_maps;
+ return that;
+ }
+ }
+ return this;
+}
+
Node* LoadElimination::AbstractState::LookupElement(Node* object,
Node* index) const {
if (this->elements_) {
@@ -456,7 +535,7 @@ LoadElimination::AbstractState::KillFields(Node* object, Zone* zone) const {
AbstractField const* that_field = this_field->Kill(object, zone);
if (that_field != this_field) {
AbstractState* that = new (zone) AbstractState(*this);
- that->fields_[i] = this_field;
+ that->fields_[i] = that_field;
while (++i < arraysize(fields_)) {
if (this->fields_[i] != nullptr) {
that->fields_[i] = this->fields_[i]->Kill(object, zone);
@@ -481,6 +560,10 @@ void LoadElimination::AbstractState::Print() const {
PrintF(" checks:\n");
checks_->Print();
}
+ if (maps_) {
+ PrintF(" maps:\n");
+ maps_->Print();
+ }
if (elements_) {
PrintF(" elements:\n");
elements_->Print();
@@ -520,23 +603,18 @@ Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
}
Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+ ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int const map_input_count = node->op()->ValueInputCount() - 1;
- if (Node* const object_map =
- state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
- for (int i = 0; i < map_input_count; ++i) {
- Node* map = NodeProperties::GetValueInput(node, 1 + i);
- if (map == object_map) return Replace(effect);
- }
- }
- if (map_input_count == 1) {
- Node* const map0 = NodeProperties::GetValueInput(node, 1);
- state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
- zone());
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ if (maps.contains(object_maps)) return Replace(effect);
+ state = state->KillMaps(object, zone());
+ // TODO(turbofan): Compute the intersection.
}
+ state = state->AddMaps(object, maps, zone());
return UpdateState(node, state);
}
@@ -546,18 +624,16 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- if (Node* const elements_map =
- state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
// Check if the {elements} already have the fixed array map.
- if (elements_map == fixed_array_map) {
- ReplaceWithValue(node, elements, effect);
- return Replace(elements);
- }
+ ZoneHandleSet<Map> elements_maps;
+ ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+ if (state->LookupMaps(elements, &elements_maps) &&
+ fixed_array_maps.contains(elements_maps)) {
+ ReplaceWithValue(node, elements, effect);
+ return Replace(elements);
}
// We know that the resulting elements have the fixed array map.
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_array_map, zone());
+ state = state->AddMaps(node, fixed_array_maps, zone());
// Kill the previous elements on {object}.
state =
state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
@@ -575,14 +651,12 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
if (state == nullptr) return NoChange();
if (flags & GrowFastElementsFlag::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
- Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_double_array_map, zone());
+ state = state->AddMaps(
+ node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
} else {
// We know that the resulting elements have the fixed array map.
- Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_array_map, zone());
+ state = state->AddMaps(
+ node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
}
if (flags & GrowFastElementsFlag::kArrayObject) {
// Kill the previous Array::length on {object}.
@@ -599,31 +673,30 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
}
Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+ ElementsTransition transition = ElementsTransitionOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
- Node* const source_map = NodeProperties::GetValueInput(node, 1);
- Node* const target_map = NodeProperties::GetValueInput(node, 2);
+ Handle<Map> source_map(transition.source());
+ Handle<Map> target_map(transition.target());
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (Node* const object_map =
- state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
- if (target_map == object_map) {
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ if (ZoneHandleSet<Map>(target_map).contains(object_maps)) {
// The {object} already has the {target_map}, so this TransitionElements
// {node} is fully redundant (independent of what {source_map} is).
return Replace(effect);
}
- state =
- state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
- if (source_map == object_map) {
- state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
- target_map, zone());
+ if (object_maps.contains(ZoneHandleSet<Map>(source_map))) {
+ object_maps.remove(source_map, zone());
+ object_maps.insert(target_map, zone());
+ state = state->KillMaps(object, zone());
+ state = state->AddMaps(object, object_maps, zone());
}
} else {
- state =
- state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
+ state = state->KillMaps(object, zone());
}
- ElementsTransition transition = ElementsTransitionOf(node->op());
- switch (transition) {
+ switch (transition.mode()) {
case ElementsTransition::kFastTransition:
break;
case ElementsTransition::kSlowTransition:
@@ -642,23 +715,40 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int field_index = FieldIndexOf(access);
- if (field_index >= 0) {
- if (Node* replacement = state->LookupField(object, field_index)) {
- // Make sure we don't resurrect dead {replacement} nodes.
- if (!replacement->IsDead()) {
- // We might need to guard the {replacement} if the type of the
- // {node} is more precise than the type of the {replacement}.
- Type* const node_type = NodeProperties::GetType(node);
- if (!NodeProperties::GetType(replacement)->Is(node_type)) {
- replacement = graph()->NewNode(common()->TypeGuard(node_type),
- replacement, control);
+ if (access.offset == HeapObject::kMapOffset &&
+ access.base_is_tagged == kTaggedBase) {
+ DCHECK(IsAnyTagged(access.machine_type.representation()));
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps) && object_maps.size() == 1) {
+ Node* value = jsgraph()->HeapConstant(object_maps[0]);
+ NodeProperties::SetType(value, Type::OtherInternal());
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ } else {
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ if (Node* replacement = state->LookupField(object, field_index)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ // We might need to guard the {replacement} if the type of the
+ // {node} is more precise than the type of the {replacement}.
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+ replacement = graph()->NewNode(common()->TypeGuard(node_type),
+ replacement, control);
+ NodeProperties::SetType(replacement, node_type);
+ }
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
}
- ReplaceWithValue(node, replacement, effect);
- return Replace(replacement);
}
+ state = state->AddField(object, field_index, node, zone());
}
- state = state->AddField(object, field_index, node, zone());
+ }
+ Handle<Map> field_map;
+ if (access.map.ToHandle(&field_map)) {
+ state = state->AddMaps(node, ZoneHandleSet<Map>(field_map), zone());
}
return UpdateState(node, state);
}
@@ -670,19 +760,33 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int field_index = FieldIndexOf(access);
- if (field_index >= 0) {
- Node* const old_value = state->LookupField(object, field_index);
- if (old_value == new_value) {
- // This store is fully redundant.
- return Replace(effect);
+ if (access.offset == HeapObject::kMapOffset &&
+ access.base_is_tagged == kTaggedBase) {
+ DCHECK(IsAnyTagged(access.machine_type.representation()));
+ // Kill all potential knowledge about the {object}s map.
+ state = state->KillMaps(object, zone());
+ Type* const new_value_type = NodeProperties::GetType(new_value);
+ if (new_value_type->IsHeapConstant()) {
+ // Record the new {object} map information.
+ ZoneHandleSet<Map> object_maps(
+ Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+ state = state->AddMaps(object, object_maps, zone());
}
- // Kill all potentially aliasing fields and record the new value.
- state = state->KillField(object, field_index, zone());
- state = state->AddField(object, field_index, new_value, zone());
} else {
- // Unsupported StoreField operator.
- state = state->KillFields(object, zone());
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ Node* const old_value = state->LookupField(object, field_index);
+ if (old_value == new_value) {
+ // This store is fully redundant.
+ return Replace(effect);
+ }
+ // Kill all potentially aliasing fields and record the new value.
+ state = state->KillField(object, field_index, zone());
+ state = state->AddField(object, field_index, new_value, zone());
+ } else {
+ // Unsupported StoreField operator.
+ state = state->KillFields(object, zone());
+ }
}
return UpdateState(node, state);
}
@@ -703,6 +807,7 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
if (!NodeProperties::GetType(replacement)->Is(node_type)) {
replacement = graph()->NewNode(common()->TypeGuard(node_type),
replacement, control);
+ NodeProperties::SetType(replacement, node_type);
}
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
@@ -865,21 +970,31 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
break;
}
case IrOpcode::kTransitionElementsKind: {
+ ElementsTransition transition = ElementsTransitionOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
- state = state->KillField(
- object, FieldIndexOf(HeapObject::kMapOffset), zone());
- state = state->KillField(
- object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ ZoneHandleSet<Map> object_maps;
+ if (!state->LookupMaps(object, &object_maps) ||
+ !ZoneHandleSet<Map>(transition.target())
+ .contains(object_maps)) {
+ state = state->KillMaps(object, zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ }
break;
}
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
- int field_index = FieldIndexOf(access);
- if (field_index < 0) {
- state = state->KillFields(object, zone());
+ if (access.offset == HeapObject::kMapOffset) {
+ // Invalidate what we know about the {object}s map.
+ state = state->KillMaps(object, zone());
} else {
- state = state->KillField(object, field_index, zone());
+ int field_index = FieldIndexOf(access);
+ if (field_index < 0) {
+ state = state->KillFields(object, zone());
+ } else {
+ state = state->KillField(object, field_index, zone());
+ }
}
break;
}
@@ -911,7 +1026,8 @@ int LoadElimination::FieldIndexOf(int offset) {
DCHECK_EQ(0, offset % kPointerSize);
int field_index = offset / kPointerSize;
if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
- return field_index;
+ DCHECK_LT(0, field_index);
+ return field_index - 1;
}
// static
@@ -957,6 +1073,8 @@ CommonOperatorBuilder* LoadElimination::common() const {
Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
+Factory* LoadElimination::factory() const { return jsgraph()->factory(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 50979e4da8..cd486a2cd7 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -8,9 +8,14 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class Factory;
+
namespace compiler {
// Foward declarations.
@@ -152,6 +157,49 @@ class V8_EXPORT_PRIVATE LoadElimination final
static size_t const kMaxTrackedFields = 32;
+ // Abstract state to approximate the current map of an object along the
+ // effect paths through the graph.
+ class AbstractMaps final : public ZoneObject {
+ public:
+ explicit AbstractMaps(Zone* zone) : info_for_node_(zone) {}
+ AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone)
+ : info_for_node_(zone) {
+ info_for_node_.insert(std::make_pair(object, maps));
+ }
+
+ AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
+ Zone* zone) const {
+ AbstractMaps* that = new (zone) AbstractMaps(zone);
+ that->info_for_node_ = this->info_for_node_;
+ that->info_for_node_.insert(std::make_pair(object, maps));
+ return that;
+ }
+ bool Lookup(Node* object, ZoneHandleSet<Map>* object_maps) const;
+ AbstractMaps const* Kill(Node* object, Zone* zone) const;
+ bool Equals(AbstractMaps const* that) const {
+ return this == that || this->info_for_node_ == that->info_for_node_;
+ }
+ AbstractMaps const* Merge(AbstractMaps const* that, Zone* zone) const {
+ if (this->Equals(that)) return this;
+ AbstractMaps* copy = new (zone) AbstractMaps(zone);
+ for (auto this_it : this->info_for_node_) {
+ Node* this_object = this_it.first;
+ ZoneHandleSet<Map> this_maps = this_it.second;
+ auto that_it = that->info_for_node_.find(this_object);
+ if (that_it != that->info_for_node_.end() &&
+ that_it->second == this_maps) {
+ copy->info_for_node_.insert(this_it);
+ }
+ }
+ return copy;
+ }
+
+ void Print() const;
+
+ private:
+ ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_;
+ };
+
class AbstractState final : public ZoneObject {
public:
AbstractState() {
@@ -163,6 +211,11 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
+ AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
+ Zone* zone) const;
+ AbstractState const* KillMaps(Node* object, Zone* zone) const;
+ bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
+
AbstractState const* AddField(Node* object, size_t index, Node* value,
Zone* zone) const;
AbstractState const* KillField(Node* object, size_t index,
@@ -185,6 +238,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractChecks const* checks_ = nullptr;
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
+ AbstractMaps const* maps_ = nullptr;
};
class AbstractStateForEffectNodes final : public ZoneObject {
@@ -223,6 +277,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
CommonOperatorBuilder* common() const;
AbstractState const* empty_state() const { return &empty_state_; }
+ Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Zone* zone() const { return node_states_.zone(); }
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index a8f7a25e1f..ecabbe0575 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -30,6 +30,10 @@ class MachineRepresentationInferrer {
Run();
}
+ CallDescriptor* call_descriptor() const {
+ return linkage_->GetIncomingDescriptor();
+ }
+
MachineRepresentation GetRepresentation(Node const* node) const {
return representation_vector_.at(node->id());
}
@@ -66,6 +70,18 @@ class MachineRepresentationInferrer {
}
}
+ MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return MachineRepresentation::kWord32;
+ default:
+ break;
+ }
+ return rep;
+ }
+
void Run() {
auto blocks = schedule_->all_blocks();
for (BasicBlock* block : *blocks) {
@@ -82,6 +98,11 @@ class MachineRepresentationInferrer {
linkage_->GetParameterType(ParameterIndexOf(node->op()))
.representation();
break;
+ case IrOpcode::kReturn: {
+ representation_vector_[node->id()] = PromoteRepresentation(
+ linkage_->GetReturnType().representation());
+ break;
+ }
case IrOpcode::kProjection: {
representation_vector_[node->id()] = GetProjectionType(node);
} break;
@@ -91,12 +112,12 @@ class MachineRepresentationInferrer {
case IrOpcode::kAtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
- representation_vector_[node->id()] =
- LoadRepresentationOf(node->op()).representation();
+ representation_vector_[node->id()] = PromoteRepresentation(
+ LoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kCheckedLoad:
- representation_vector_[node->id()] =
- CheckedLoadRepresentationOf(node->op()).representation();
+ representation_vector_[node->id()] = PromoteRepresentation(
+ CheckedLoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
@@ -104,6 +125,10 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
+ case IrOpcode::kUnalignedLoad:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ UnalignedLoadRepresentationOf(node->op()).representation());
+ break;
case IrOpcode::kPhi:
representation_vector_[node->id()] =
PhiRepresentationOf(node->op());
@@ -119,9 +144,22 @@ class MachineRepresentationInferrer {
}
break;
}
- case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kAtomicStore:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ break;
+ case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ StoreRepresentationOf(node->op()).representation());
+ break;
+ case IrOpcode::kCheckedStore:
representation_vector_[node->id()] =
- UnalignedLoadRepresentationOf(node->op()).representation();
+ PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
+ break;
+ case IrOpcode::kUnalignedStore:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ UnalignedStoreRepresentationOf(node->op()));
break;
case IrOpcode::kHeapConstant:
case IrOpcode::kNumberConstant:
@@ -237,8 +275,12 @@ class MachineRepresentationChecker {
public:
MachineRepresentationChecker(
Schedule const* const schedule,
- MachineRepresentationInferrer const* const inferrer)
- : schedule_(schedule), inferrer_(inferrer) {}
+ MachineRepresentationInferrer const* const inferrer, bool is_stub,
+ const char* name)
+ : schedule_(schedule),
+ inferrer_(inferrer),
+ is_stub_(is_stub),
+ name_(name) {}
void Run() {
BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -290,9 +332,17 @@ class MachineRepresentationChecker {
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
- CheckValueInputIsTaggedOrPointer(node, 0);
- CheckValueInputRepresentationIs(
- node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ if (Is64()) {
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputIsTaggedOrPointer(node, 1);
+ if (!is_stub_) {
+ CheckValueInputRepresentationIs(
+ node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ }
+ } else {
+ CheckValueInputForInt64Op(node, 0);
+ CheckValueInputForInt64Op(node, 1);
+ }
break;
case IrOpcode::kInt64LessThan:
case IrOpcode::kInt64LessThanOrEqual:
@@ -317,6 +367,19 @@ class MachineRepresentationChecker {
MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
break;
case IrOpcode::kWord32Equal:
+ if (Is32()) {
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputIsTaggedOrPointer(node, 1);
+ if (!is_stub_) {
+ CheckValueInputRepresentationIs(
+ node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ }
+ } else {
+ CheckValueInputForInt32Op(node, 0);
+ CheckValueInputForInt32Op(node, 1);
+ }
+ break;
+
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32LessThan:
@@ -374,7 +437,7 @@ class MachineRepresentationChecker {
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
- switch (StoreRepresentationOf(node->op()).representation()) {
+ switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -382,15 +445,14 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
- node, 2,
- StoreRepresentationOf(node->op()).representation());
+ node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kAtomicStore:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
- switch (AtomicStoreRepresentationOf(node->op())) {
+ switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -398,7 +460,7 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
- node, 2, AtomicStoreRepresentationOf(node->op()));
+ node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kPhi:
@@ -410,6 +472,11 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, i);
}
break;
+ case MachineRepresentation::kWord32:
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputForInt32Op(node, i);
+ }
+ break;
default:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputRepresentationIs(
@@ -422,10 +489,33 @@ class MachineRepresentationChecker {
case IrOpcode::kSwitch:
CheckValueInputForInt32Op(node, 0);
break;
- case IrOpcode::kReturn:
- // TODO(epertoso): use the linkage to determine which tipe we
- // should have here.
+ case IrOpcode::kReturn: {
+ // TODO(ishell): enable once the pop count parameter type becomes
+ // MachineType::PointerRepresentation(). Currently it's int32 or
+ // word-size.
+ // CheckValueInputRepresentationIs(
+ // node, 0, MachineType::PointerRepresentation()); // Pop count
+ size_t return_count = inferrer_->call_descriptor()->ReturnCount();
+ for (size_t i = 0; i < return_count; i++) {
+ MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
+ int input_index = static_cast<int>(i + 1);
+ switch (type.representation()) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ CheckValueInputIsTagged(node, input_index);
+ break;
+ case MachineRepresentation::kWord32:
+ CheckValueInputForInt32Op(node, input_index);
+ break;
+ default:
+ CheckValueInputRepresentationIs(
+ node, 2, inferrer_->GetRepresentation(node));
+ }
+ break;
+ }
break;
+ }
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
break;
@@ -434,6 +524,7 @@ class MachineRepresentationChecker {
std::stringstream str;
str << "Node #" << node->id() << ":" << *node->op()
<< " in the machine graph is not being checked.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
break;
@@ -443,6 +534,15 @@ class MachineRepresentationChecker {
}
private:
+ static bool Is32() {
+ return MachineType::PointerRepresentation() ==
+ MachineRepresentation::kWord32;
+ }
+ static bool Is64() {
+ return MachineType::PointerRepresentation() ==
+ MachineRepresentation::kWord64;
+ }
+
void CheckValueInputRepresentationIs(Node const* node, int index,
MachineRepresentation representation) {
Node const* input = node->InputAt(index);
@@ -450,10 +550,11 @@ class MachineRepresentationChecker {
inferrer_->GetRepresentation(input);
if (input_representation != representation) {
std::stringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
- << MachineReprToString(input_representation) << " uses node #"
- << input->id() << ":" << *input->op() << " which doesn't have a "
- << MachineReprToString(representation) << " representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op() << ":"
+ << input_representation << " which doesn't have a " << representation
+ << " representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -472,6 +573,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -482,6 +584,19 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
return;
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ if (Is32()) {
+ return;
+ }
+ break;
+ case MachineRepresentation::kWord64:
+ if (Is64()) {
+ return;
+ }
+ break;
default:
break;
}
@@ -491,6 +606,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged or pointer representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -507,6 +623,7 @@ class MachineRepresentationChecker {
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
break;
}
@@ -517,6 +634,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have an int32-compatible representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -531,6 +649,7 @@ class MachineRepresentationChecker {
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
break;
}
@@ -539,9 +658,11 @@ class MachineRepresentationChecker {
break;
}
std::ostringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
- << input_representation << " uses node #" << input->id() << ":"
- << *input->op() << " which doesn't have a kWord64 representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op() << ":"
+ << input_representation
+ << " which doesn't have a kWord64 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -555,6 +676,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat32 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -568,6 +690,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat64 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -590,11 +713,11 @@ class MachineRepresentationChecker {
str << std::endl;
}
str << " * input " << i << " (" << input->id() << ":" << *input->op()
- << ") doesn't have a " << MachineReprToString(expected_input_type)
- << " representation.";
+ << ") doesn't have a " << expected_input_type << " representation.";
}
}
if (should_log_error) {
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -657,17 +780,28 @@ class MachineRepresentationChecker {
return false;
}
+ void PrintDebugHelp(std::ostream& out, Node const* node) {
+ if (DEBUG_BOOL) {
+ out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
+ << node->id() << " for debugging.";
+ }
+ }
+
Schedule const* const schedule_;
MachineRepresentationInferrer const* const inferrer_;
+ bool is_stub_;
+ const char* name_;
};
} // namespace
void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
- Linkage* linkage, Zone* temp_zone) {
+ Linkage* linkage, bool is_stub, const char* name,
+ Zone* temp_zone) {
MachineRepresentationInferrer representation_inferrer(schedule, graph,
linkage, temp_zone);
- MachineRepresentationChecker checker(schedule, &representation_inferrer);
+ MachineRepresentationChecker checker(schedule, &representation_inferrer,
+ is_stub, name);
checker.Run();
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.h b/deps/v8/src/compiler/machine-graph-verifier.h
index b7d7b6166c..26e5d772c2 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.h
+++ b/deps/v8/src/compiler/machine-graph-verifier.h
@@ -21,7 +21,8 @@ class Schedule;
class MachineGraphVerifier {
public:
static void Run(Graph* graph, Schedule const* const schedule,
- Linkage* linkage, Zone* temp_zone);
+ Linkage* linkage, bool is_stub, const char* name,
+ Zone* temp_zone);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 0ad20f0684..f7fe19d494 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -17,9 +17,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph) {}
-
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph,
+ bool allow_signalling_nan)
+ : jsgraph_(jsgraph), allow_signalling_nan_(allow_signalling_nan) {}
MachineOperatorReducer::~MachineOperatorReducer() {}
@@ -50,12 +50,12 @@ Node* MachineOperatorReducer::Float64Mul(Node* lhs, Node* rhs) {
Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
value =
graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
- return graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
- graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
- Float64Constant(-V8_INFINITY)),
- Float64Constant(V8_INFINITY),
- graph()->NewNode(machine()->Float64Sqrt(), value));
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+ Float64Constant(-V8_INFINITY)),
+ BranchHint::kFalse);
+ return d.Phi(MachineRepresentation::kFloat64, Float64Constant(V8_INFINITY),
+ graph()->NewNode(machine()->Float64Sqrt(), value));
}
Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
@@ -316,14 +316,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
- if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+ if (allow_signalling_nan_ && m.right().Is(0) &&
+ (copysign(1.0, m.right().Value()) > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat32(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN - x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat32(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat32(m.left().Value() - m.right().Value());
@@ -350,7 +353,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
if (m.right().IsNaN()) { // x + NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.IsFoldable()) { // K + K => K
return ReplaceFloat64(m.left().Value() + m.right().Value());
@@ -359,14 +363,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
- if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+ if (allow_signalling_nan_ && m.right().Is(0) &&
+ (Double(m.right().Value()).Sign() > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN - x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().Value() - m.right().Value());
@@ -392,15 +399,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Mul: {
Float64BinopMatcher m(node);
+ if (allow_signalling_nan_ && m.right().Is(1))
+ return Replace(m.left().node()); // x * 1.0 => x
if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
node->ReplaceInput(0, Float64Constant(-0.0));
node->ReplaceInput(1, m.left().node());
NodeProperties::ChangeOp(node, machine()->Float64Sub());
return Changed(node);
}
- if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
if (m.right().IsNaN()) { // x * NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.IsFoldable()) { // K * K => K
return ReplaceFloat64(m.left().Value() * m.right().Value());
@@ -414,12 +423,16 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Div: {
Float64BinopMatcher m(node);
- if (m.right().Is(1)) return Replace(m.left().node()); // x / 1.0 => x
+ if (allow_signalling_nan_ && m.right().Is(1))
+ return Replace(m.left().node()); // x / 1.0 => x
+ // TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN / x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // K / K => K
return ReplaceFloat64(m.left().Value() / m.right().Value());
@@ -664,6 +677,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return ReduceFloat64Compare(node);
+ case IrOpcode::kFloat64RoundDown:
+ return ReduceFloat64RoundDown(node);
default:
break;
}
@@ -841,14 +856,13 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
if (base::bits::IsPowerOfTwo32(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
- node->ReplaceInput(
- 1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
- node->ReplaceInput(2, Word32And(dividend, mask));
- NodeProperties::ChangeOp(
- node,
- common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(), dividend, zero),
+ BranchHint::kFalse);
+ return Replace(
+ d.Phi(MachineRepresentation::kWord32,
+ Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)),
+ Word32And(dividend, mask)));
} else {
Node* quotient = Int32Div(dividend, divisor);
DCHECK_EQ(dividend, node->InputAt(0));
@@ -1392,6 +1406,14 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) {
+ return ReplaceFloat64(Floor(m.Value()));
+ }
+ return NoChange();
+}
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index d0845d9fab..593f7f2d22 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -24,7 +24,8 @@ class JSGraph;
class V8_EXPORT_PRIVATE MachineOperatorReducer final
: public NON_EXPORTED_BASE(Reducer) {
public:
- explicit MachineOperatorReducer(JSGraph* jsgraph);
+ explicit MachineOperatorReducer(JSGraph* jsgraph,
+ bool allow_signalling_nan = true);
~MachineOperatorReducer();
Reduction Reduce(Node* node) override;
@@ -96,6 +97,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
+ Reduction ReduceFloat64RoundDown(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -103,6 +105,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
MachineOperatorBuilder* machine() const;
JSGraph* jsgraph_;
+ bool allow_signalling_nan_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index e36a61e733..80310e1f5a 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -43,7 +43,8 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kStore, op->opcode());
+ DCHECK(IrOpcode::kStore == op->opcode() ||
+ IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
@@ -69,9 +70,9 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
-MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+int StackSlotSizeOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
- return OpParameter<MachineRepresentation>(op);
+ return OpParameter<int>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -241,9 +242,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
@@ -263,9 +261,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
@@ -390,7 +385,10 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
+ V(Simd128Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Simd32x4Shuffle, Operator::kNoProperties, 6, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -460,6 +458,15 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(kWord16) \
V(kWord32)
+#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+
+struct StackSlotOperator : public Operator1<int> {
+ explicit StackSlotOperator(int size)
+ : Operator1<int>(IrOpcode::kStackSlot,
+ Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
+ 0, 0, 1, 0, 0, size) {}
+};
+
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -485,56 +492,51 @@ struct MachineOperatorGlobalCache {
OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct UnalignedLoad##Type##Operator final \
- : public Operator1<UnalignedLoadRepresentation> { \
- UnalignedLoad##Type##Operator() \
- : Operator1<UnalignedLoadRepresentation>( \
- IrOpcode::kUnalignedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct ProtectedLoad##Type##Operator final \
- : public Operator1<ProtectedLoadRepresentation> { \
- ProtectedLoad##Type##Operator() \
- : Operator1<ProtectedLoadRepresentation>( \
- IrOpcode::kProtectedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
- UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type; \
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<UnalignedLoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<UnalignedLoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct CheckedLoad##Type##Operator final \
+ : public Operator1<CheckedLoadRepresentation> { \
+ CheckedLoad##Type##Operator() \
+ : Operator1<CheckedLoadRepresentation>( \
+ IrOpcode::kCheckedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 3, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
+ CheckedLoad##Type##Operator kCheckedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Type) \
- struct StackSlot##Type##Operator final \
- : public Operator1<MachineRepresentation> { \
- StackSlot##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
- "StackSlot", 0, 0, 0, 1, 0, 0, \
- MachineType::Type().representation()) {} \
- }; \
- StackSlot##Type##Operator kStackSlot##Type;
- MACHINE_TYPE_LIST(STACKSLOT)
+#define STACKSLOT(Size) \
+ struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
+ StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {} \
+ }; \
+ StackSlotOfSize##Size##Operator kStackSlotSize##Size;
+ STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
#undef STACKSLOT
#define STORE(Type) \
@@ -585,13 +587,24 @@ struct MachineOperatorGlobalCache {
"CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
} \
}; \
+ struct ProtectedStore##Type##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit ProtectedStore##Type##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kProtectedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 4, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kNoWriteBarrier)) {} \
+ }; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- CheckedStore##Type##Operator kCheckedStore##Type;
+ CheckedStore##Type##Operator kCheckedStore##Type; \
+ ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -726,15 +739,21 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
return nullptr;
}
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
-#define STACKSLOT(Type) \
- if (rep == MachineType::Type().representation()) { \
- return &cache_.kStackSlot##Type; \
+const Operator* MachineOperatorBuilder::StackSlot(int size) {
+ DCHECK_LE(0, size);
+#define CASE_CACHED_SIZE(Size) \
+ case Size: \
+ return &cache_.kStackSlotSize##Size;
+ switch (size) {
+ STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
+ default:
+ return new (zone_) StackSlotOperator(size);
}
- MACHINE_TYPE_LIST(STACKSLOT)
-#undef STACKSLOT
- UNREACHABLE();
- return nullptr;
+#undef CASE_CACHED_SIZE
+}
+
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+ return StackSlot(1 << ElementSizeLog2Of(rep));
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -762,6 +781,23 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::ProtectedStore(
+ MachineRepresentation rep) {
+ switch (rep) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kProtectedStore##kRep; \
+ break;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
return &cache_.kUnsafePointerAdd;
}
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 1cbec994a8..d226879521 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -43,7 +43,6 @@ class OptionalOperator final {
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
-typedef LoadRepresentation ProtectedLoadRepresentation;
LoadRepresentation LoadRepresentationOf(Operator const*);
@@ -94,7 +93,7 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+int StackSlotSizeOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
@@ -448,9 +447,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float32x4LessThanOrEqual();
const Operator* Float32x4GreaterThan();
const Operator* Float32x4GreaterThanOrEqual();
- const Operator* Float32x4Select();
- const Operator* Float32x4Swizzle();
- const Operator* Float32x4Shuffle();
const Operator* Float32x4FromInt32x4();
const Operator* Float32x4FromUint32x4();
@@ -471,9 +467,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int32x4LessThanOrEqual();
const Operator* Int32x4GreaterThan();
const Operator* Int32x4GreaterThanOrEqual();
- const Operator* Int32x4Select();
- const Operator* Int32x4Swizzle();
- const Operator* Int32x4Shuffle();
const Operator* Int32x4FromFloat32x4();
const Operator* Uint32x4Min();
@@ -608,6 +601,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Simd128Or();
const Operator* Simd128Xor();
const Operator* Simd128Not();
+ const Operator* Simd32x4Select();
+ const Operator* Simd32x4Swizzle();
+ const Operator* Simd32x4Shuffle();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -615,6 +611,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
+ const Operator* ProtectedStore(MachineRepresentation rep);
// unaligned load [base + index]
const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
@@ -622,6 +619,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
+ const Operator* StackSlot(int size);
const Operator* StackSlot(MachineRepresentation rep);
// Access to the machine stack.
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 66fcbb9362..7e9a522a70 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -20,7 +20,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
- zone_(zone) {}
+ zone_(zone),
+ graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -91,7 +92,9 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
return VisitOtherEffect(node, state);
@@ -101,12 +104,17 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
+#define __ gasm()->
+
void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
+
+ gasm()->Reset(effect, control);
+
PretenureFlag pretenure = PretenureFlagOf(node->op());
// Propagate tenuring from outer allocations to inner allocations, i.e.
@@ -141,11 +149,11 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
}
// Determine the top/limit addresses.
- Node* top_address = jsgraph()->ExternalConstant(
+ Node* top_address = __ ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = jsgraph()->ExternalConstant(
+ Node* limit_address = __ ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
@@ -171,89 +179,69 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
- jsgraph()->IntPtrConstant(object_size));
- effect = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+ Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
- value = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), state->top(),
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ value = __ BitcastWordToTagged(
+ __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
// Extend the allocation {group}.
group->Add(value);
state = AllocationState::Open(group, state_size, top, zone());
} else {
+ auto call_runtime = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineType::PointerRepresentation());
+
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
- Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+ Node* size = __ UniqueInt32Constant(object_size);
// Load allocation top and limit.
- Node* top = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
- jsgraph()->IntPtrConstant(0), effect, control);
- Node* limit = effect = graph()->NewNode(
- machine()->Load(MachineType::Pointer()), limit_address,
- jsgraph()->IntPtrConstant(0), effect, control);
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
- Node* check = graph()->NewNode(
- machine()->UintLessThan(),
- graph()->NewNode(
- machine()->IntAdd(), top,
- machine()->Is64()
- ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
- : size),
+ Node* check = __ UintLessThan(
+ __ IntAdd(top,
+ machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
limit);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = top;
+ __ GotoUnless(check, &call_runtime);
+ __ Goto(&done, top);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
+ __ Bind(&call_runtime);
{
- Node* target = pretenure == NOT_TENURED
- ? jsgraph()->AllocateInNewSpaceStubConstant()
- : jsgraph()->AllocateInOldSpaceStubConstant();
+ Node* target =
+ pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+ : __
+ AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
- vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
- size, efalse, if_false);
- vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
- jsgraph()->IntPtrConstant(kHeapObjectTag));
+ Node* vfalse = __ Call(allocate_operator_.get(), target, size);
+ vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+ __ Goto(&done, vfalse);
}
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(
- common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
- control);
+ __ Bind(&done);
// Compute the new top and write it back.
- top = graph()->NewNode(machine()->IntAdd(), value,
- jsgraph()->IntPtrConstant(object_size));
- effect = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+ top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
- value = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), value,
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ value = __ BitcastWordToTagged(
+ __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
// Start a new allocation group.
AllocationGroup* group =
@@ -261,61 +249,42 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
+ auto call_runtime = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedPointer);
+
// Load allocation top and limit.
- Node* top = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
- jsgraph()->IntPtrConstant(0), effect, control);
- Node* limit = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
- jsgraph()->IntPtrConstant(0), effect, control);
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
- Node* new_top = graph()->NewNode(
- machine()->IntAdd(), top,
- machine()->Is64()
- ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
- : size);
+ Node* new_top =
+ __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
// Check if we can do bump pointer allocation here.
- Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- etrue = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
- vtrue = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), top,
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ Node* check = __ UintLessThan(new_top, limit);
+ __ GotoUnless(check, &call_runtime);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), new_top);
+ __ Goto(&done, __ BitcastWordToTagged(
+ __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
+
+ __ Bind(&call_runtime);
+ Node* target =
+ pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+ : __
+ AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
}
+ __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* target = pretenure == NOT_TENURED
- ? jsgraph()->AllocateInNewSpaceStubConstant()
- : jsgraph()->AllocateInOldSpaceStubConstant();
- if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
- Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
- }
- vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
- efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
- control);
+ __ Bind(&done);
+ value = done.PhiAt(0);
// Create an unfoldable allocation group.
AllocationGroup* group =
@@ -323,6 +292,10 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
state = AllocationState::Closed(group, zone());
}
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+ USE(control); // Floating control, dropped on the floor.
+
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
// {node} with the {value}.
@@ -340,6 +313,8 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
node->Kill();
}
+#undef __
+
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index ba1d6dd72b..1541d22896 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
+#include "src/compiler/graph-assembler.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -131,6 +132,7 @@ class MemoryOptimizer final {
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
+ GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
@@ -138,6 +140,7 @@ class MemoryOptimizer final {
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
+ GraphAssembler graph_assembler_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 0a62b52d4f..60f634254c 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -270,6 +270,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
bool must_save_lr_;
};
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
+
+#undef CREATE_OOL_CLASS
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
@@ -1132,36 +1152,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsMaddS:
- __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMaddD:
- __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMipsMaddfS:
- __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMipsMaddfD:
- __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMsubS:
- __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMsubD:
- __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMipsMsubfS:
- __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMipsMsubfD:
- __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1239,47 +1247,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsFloat32Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat64Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ DoubleRegister dst = i.OutputDoubleRegister();
+ DoubleRegister src1 = i.InputDoubleRegister(0);
+ DoubleRegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat32Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat64Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ DoubleRegister dst = i.OutputDoubleRegister();
+ DoubleRegister src1 = i.InputDoubleRegister(0);
+ DoubleRegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsCvtSD: {
@@ -1628,12 +1628,12 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- MipsOperandConverter i(this, instr);
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
@@ -1642,12 +1642,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
+ MipsOperandConverter i(gen, instr);
if (instr->arch_opcode() == kMipsTst) {
- cc = FlagsConditionToConditionTst(branch->condition);
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsAddOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1657,11 +1658,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsSubOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1671,11 +1672,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1685,15 +1686,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsMulOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
- cc = FlagsConditionToConditionCmp(branch->condition);
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMipsCmpS) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -1703,8 +1704,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -1718,7 +1719,17 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
instr->arch_opcode());
UNIMPLEMENTED();
}
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
}
@@ -1726,6 +1737,66 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ MipsOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2080,9 +2151,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ li(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ li(dst, Operand(src.ToInt32()));
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 45ed041175..edff56f72b 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -71,12 +71,8 @@ namespace compiler {
V(MipsMulPair) \
V(MipsMaddS) \
V(MipsMaddD) \
- V(MipsMaddfS) \
- V(MipsMaddfD) \
V(MipsMsubS) \
V(MipsMsubD) \
- V(MipsMsubfS) \
- V(MipsMsubfD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1e4b996531..3dcf708349 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -188,6 +188,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.TempImmediate(cont->trap_id());
}
if (cont->IsDeoptimize()) {
@@ -368,6 +370,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
@@ -652,7 +658,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -664,7 +670,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
@@ -900,35 +906,23 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
- Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
- Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMipsAddS, node);
@@ -937,35 +931,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
- Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
- Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMipsAddD, node);
@@ -974,9 +956,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMsubS, g.DefineAsRegister(node),
@@ -984,24 +966,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- if (IsMipsArchVariant(kMips32r6)) {
- // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
- Float32BinopMatcher mright(m.right().node());
- Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMsubD, g.DefineAsRegister(node),
@@ -1009,15 +982,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- if (IsMipsArchVariant(kMips32r6)) {
- // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
- Float64BinopMatcher mright(m.right().node());
- Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMipsSubD, node);
}
@@ -1406,9 +1370,12 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -1616,10 +1583,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -1643,6 +1613,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index a3bf433d4a..ba921e265b 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -270,6 +270,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
bool must_save_lr_;
};
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
@@ -366,85 +386,108 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
}
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+ __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
+ Operand(zero_reg)); \
} else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
} \
- __ bind(ool->exit()); \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+ __ Or(kScratchReg, zero_reg, Operand(offset)); \
+ __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
+ __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
} else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
} \
- __ bind(ool->exit()); \
} while (0)
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
+ do { \
+ auto result = i.Output##width##Register(); \
+ auto ool = new (zone()) OutOfLineLoad##width(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
+ ool->entry()); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
+ ool->entry()); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.InputOrZeroRegister(2); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ auto value = i.InputOrZeroRegister(2); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
} while (0)
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
@@ -1326,36 +1369,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64MaddS:
- __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MaddD:
- __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMips64MaddfS:
- __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMips64MaddfD:
- __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MsubS:
- __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MsubD:
- __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMips64MsubfS:
- __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMips64MsubfD:
- __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1430,47 +1461,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64Float32Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float32Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64SilenceNaN:
@@ -1935,12 +1958,13 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
+ MipsOperandConverter i(gen, instr);
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- MipsOperandConverter i(this, instr);
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
@@ -1950,17 +1974,17 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// they are tested here.
if (instr->arch_opcode() == kMips64Tst) {
- cc = FlagsConditionToConditionTst(branch->condition);
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- cc = FlagsConditionToConditionOvf(branch->condition);
+ cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1970,11 +1994,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64DaddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64DsubOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1984,11 +2008,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64DsubOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow: {
__ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel, kScratchReg);
@@ -1998,15 +2022,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel, kScratchReg);
} break;
default:
- UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64MulOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
- cc = FlagsConditionToConditionCmp(branch->condition);
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMips64CmpS) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -2016,8 +2040,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -2031,7 +2055,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
instr->arch_opcode());
UNIMPLEMENTED();
}
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
}
@@ -2039,6 +2074,63 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+ void Generate() final {
+ MipsOperandConverter i(gen_, instr_);
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2401,7 +2493,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ li(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ li(dst, Operand(src.ToInt32()));
@@ -2411,11 +2503,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ li(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ li(dst, Operand(src.ToInt64()));
}
break;
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 8f68ced62e..0c0e1aa61e 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -87,12 +87,8 @@ namespace compiler {
V(Mips64MinD) \
V(Mips64MaddS) \
V(Mips64MaddD) \
- V(Mips64MaddfS) \
- V(Mips64MaddfD) \
V(Mips64MsubS) \
V(Mips64MsubD) \
- V(Mips64MsubfS) \
- V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index fbf09d6ca2..d48007b858 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -92,9 +92,35 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Tst:
case kMips64Xor:
return is_uint16(value);
+ case kMips64Lb:
+ case kMips64Lbu:
+ case kMips64Sb:
+ case kMips64Lh:
+ case kMips64Lhu:
+ case kMips64Sh:
+ case kMips64Lw:
+ case kMips64Sw:
+ case kMips64Ld:
+ case kMips64Sd:
+ case kMips64Lwc1:
+ case kMips64Swc1:
case kMips64Ldc1:
case kMips64Sdc1:
- return is_int16(value + kIntSize);
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ return is_int32(value);
default:
return is_int16(value);
}
@@ -169,6 +195,16 @@ struct ExtendingLoadMatcher {
DCHECK(m.IsWord64Sar());
if (m.left().IsLoad() && m.right().Is(32) &&
selector_->CanCover(m.node(), m.left().node())) {
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK(ElementSizeLog2Of(rep) == 3);
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
Mips64OperandGenerator g(selector_);
Node* load = m.left().node();
Node* offset = load->InputAt(1);
@@ -186,7 +222,8 @@ struct ExtendingLoadMatcher {
}
};
-bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
ExtendingLoadMatcher m(node, selector);
Mips64OperandGenerator g(selector);
if (m.Matches()) {
@@ -196,7 +233,7 @@ bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
m.opcode() | AddressingModeField::encode(kMode_MRI);
DCHECK(is_int32(m.immediate()));
inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
- InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
inputs);
return true;
@@ -247,6 +284,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.TempImmediate(cont->trap_id());
}
if (cont->IsDeoptimize()) {
@@ -438,6 +477,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
@@ -748,7 +791,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
- if (TryEmitExtendingLoad(this, node)) return;
+ if (TryEmitExtendingLoad(this, node, node)) return;
VisitRRO(this, kMips64Dsar, node);
}
@@ -824,7 +867,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -836,7 +879,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -856,7 +899,7 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
@@ -869,7 +912,7 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -1318,13 +1361,17 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar: {
- Int64BinopMatcher m(value);
- if (m.right().IsInRange(32, 63)) {
- // After smi untagging no need for truncate. Combine sequence.
- Emit(kMips64Dsar, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()),
- g.UseImmediate(m.right().node()));
+ if (TryEmitExtendingLoad(this, value, node)) {
return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
}
break;
}
@@ -1404,35 +1451,23 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
Mips64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
- Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
- Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMips64AddS, node);
@@ -1441,35 +1476,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
Mips64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y).
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
- Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
- Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMips64AddD, node);
@@ -1478,9 +1501,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
Mips64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MsubS, g.DefineAsRegister(node),
@@ -1488,24 +1511,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- if (kArchVariant == kMips64r6) {
- // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
- Float32BinopMatcher mright(m.right().node());
- Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MsubD, g.DefineAsRegister(node),
@@ -1513,15 +1527,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- if (kArchVariant == kMips64r6) {
- // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
- Float64BinopMatcher mright(m.right().node());
- Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMips64SubD, node);
}
@@ -1849,6 +1854,15 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
: g.UseRegister(length)
: g.UseRegister(length);
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.DefineAsRegister(node), offset_operand,
+ g.UseImmediate(length), g.UseRegister(buffer));
+ return;
+ }
+ }
+
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer));
@@ -1901,6 +1915,15 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
: g.UseRegister(length)
: g.UseRegister(length);
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
+ g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
+ return;
+ }
+ }
+
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
g.UseRegister(buffer));
@@ -1921,9 +1944,12 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -2135,6 +2161,9 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->reason(),
cont->frame_state());
+ } else if (cont->IsTrap()) {
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -2280,6 +2309,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index 84666d5f07..e38105dd8b 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -20,11 +20,10 @@ class NodeMarkerBase {
public:
NodeMarkerBase(Graph* graph, uint32_t num_states);
- V8_INLINE Mark Get(Node* node) {
+ V8_INLINE Mark Get(const Node* node) {
Mark mark = node->mark();
if (mark < mark_min_) {
- mark = mark_min_;
- node->set_mark(mark_min_);
+ return 0;
}
DCHECK_LT(mark, mark_max_);
return mark - mark_min_;
@@ -52,9 +51,9 @@ class NodeMarkerBase {
// set to State(0) in constant time.
//
// In its current implementation, in debug mode NodeMarker will try to
-// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
-// or set a node with a NodeMarker, and then get or set that node
-// with an older NodeMarker you will get a crash.
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you set a
+// node with a NodeMarker, and then get or set that node with an older
+// NodeMarker you will get a crash.
//
// GraphReducer uses a NodeMarker, so individual Reducers cannot use a
// NodeMarker.
@@ -64,7 +63,7 @@ class NodeMarker : public NodeMarkerBase {
V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
: NodeMarkerBase(graph, num_states) {}
- V8_INLINE State Get(Node* node) {
+ V8_INLINE State Get(const Node* node) {
return static_cast<State>(NodeMarkerBase::Get(node));
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 646dbc209e..cc3a07d7e3 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/node-properties.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -338,6 +339,17 @@ MaybeHandle<Context> NodeProperties::GetSpecializationContext(
// static
+Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
+ Node* context = NodeProperties::GetContextInput(node);
+ while (*depth > 0 &&
+ IrOpcode::IsContextChainExtendingOpcode(context->opcode())) {
+ context = NodeProperties::GetContextInput(context);
+ (*depth)--;
+ }
+ return context;
+}
+
+// static
Type* NodeProperties::GetTypeOrAny(Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 23253239a1..d428160651 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -132,6 +132,11 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static MaybeHandle<Context> GetSpecializationContext(
Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
+ // Walk up the context chain from the given {node} until we reduce the {depth}
+ // to 0 or hit a node that does not extend the context chain ({depth} will be
+ // updated accordingly).
+ static Node* GetOuterContext(Node* node, size_t* depth);
+
// ---------------------------------------------------------------------------
// Type.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index f4e7b17ed2..1410ab436c 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -404,9 +404,6 @@ Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
}
-bool Node::InputEdges::empty() const { return begin() == end(); }
-
-
Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
const_iterator result(*this);
++(*this);
@@ -414,9 +411,6 @@ Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
}
-bool Node::Inputs::empty() const { return begin() == end(); }
-
-
Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
iterator result(*this);
++(*this);
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index dc6c5dc01c..7c9f3ad26f 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -46,7 +46,7 @@ class V8_EXPORT_PRIVATE Node final {
Node* const* inputs, bool has_extensible_inputs);
static Node* Clone(Zone* zone, NodeId id, const Node* node);
- bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
+ inline bool IsDead() const;
void Kill();
const Operator* op() const { return op_; }
@@ -109,41 +109,11 @@ class V8_EXPORT_PRIVATE Node final {
int UseCount() const;
void ReplaceUses(Node* replace_to);
- class InputEdges final {
- public:
- typedef Edge value_type;
-
- class iterator;
- inline iterator begin() const;
- inline iterator end() const;
-
- bool empty() const;
-
- explicit InputEdges(Node* node) : node_(node) {}
-
- private:
- Node* node_;
- };
-
- InputEdges input_edges() { return InputEdges(this); }
-
- class V8_EXPORT_PRIVATE Inputs final {
- public:
- typedef Node* value_type;
+ class InputEdges;
+ inline InputEdges input_edges();
- class const_iterator;
- inline const_iterator begin() const;
- inline const_iterator end() const;
-
- bool empty() const;
-
- explicit Inputs(Node* node) : node_(node) {}
-
- private:
- Node* node_;
- };
-
- Inputs inputs() { return Inputs(this); }
+ class Inputs;
+ inline Inputs inputs() const;
class UseEdges final {
public:
@@ -294,7 +264,7 @@ class V8_EXPORT_PRIVATE Node final {
void set_type(Type* type) { type_ = type; }
// Only NodeMarkers should manipulate the marks on nodes.
- Mark mark() { return mark_; }
+ Mark mark() const { return mark_; }
void set_mark(Mark mark) { mark_ = mark; }
inline bool has_inline_inputs() const {
@@ -345,6 +315,48 @@ static inline const T& OpParameter(const Node* node) {
return OpParameter<T>(node->op());
}
+class Node::InputEdges final {
+ public:
+ typedef Edge value_type;
+
+ class iterator;
+ inline iterator begin() const;
+ inline iterator end() const;
+
+ bool empty() const { return count_ == 0; }
+ int count() const { return count_; }
+
+ inline value_type operator[](int index) const;
+
+ InputEdges(Node** input_root, Use* use_root, int count)
+ : input_root_(input_root), use_root_(use_root), count_(count) {}
+
+ private:
+ Node** input_root_;
+ Use* use_root_;
+ int count_;
+};
+
+class V8_EXPORT_PRIVATE Node::Inputs final {
+ public:
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
+ bool empty() const { return count_ == 0; }
+ int count() const { return count_; }
+
+ inline value_type operator[](int index) const;
+
+ explicit Inputs(Node* const* input_root, int count)
+ : input_root_(input_root), count_(count) {}
+
+ private:
+ Node* const* input_root_;
+ int count_;
+};
// An encapsulation for information associated with a single use of node as a
// input from another node, allowing access to both the defining node and
@@ -373,6 +385,7 @@ class Edge final {
private:
friend class Node::UseEdges::iterator;
+ friend class Node::InputEdges;
friend class Node::InputEdges::iterator;
Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
@@ -385,12 +398,37 @@ class Edge final {
Node** input_ptr_;
};
+bool Node::IsDead() const {
+ Node::Inputs inputs = this->inputs();
+ return inputs.count() > 0 && inputs[0] == nullptr;
+}
+
+Node::InputEdges Node::input_edges() {
+ int inline_count = InlineCountField::decode(bit_field_);
+ if (inline_count != kOutlineMarker) {
+ return InputEdges(inputs_.inline_, reinterpret_cast<Use*>(this) - 1,
+ inline_count);
+ } else {
+ return InputEdges(inputs_.outline_->inputs_,
+ reinterpret_cast<Use*>(inputs_.outline_) - 1,
+ inputs_.outline_->count_);
+ }
+}
+
+Node::Inputs Node::inputs() const {
+ int inline_count = InlineCountField::decode(bit_field_);
+ if (inline_count != kOutlineMarker) {
+ return Inputs(inputs_.inline_, inline_count);
+ } else {
+ return Inputs(inputs_.outline_->inputs_, inputs_.outline_->count_);
+ }
+}
// A forward iterator to visit the edges for the input dependencies of a node.
class Node::InputEdges::iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef Edge value_type;
typedef Edge* pointer;
typedef Edge& reference;
@@ -410,12 +448,23 @@ class Node::InputEdges::iterator final {
return *this;
}
iterator operator++(int);
+ iterator& operator+=(difference_type offset) {
+ input_ptr_ += offset;
+ use_ -= offset;
+ return *this;
+ }
+ iterator operator+(difference_type offset) const {
+ return iterator(use_ - offset, input_ptr_ + offset);
+ }
+ difference_type operator-(const iterator& other) const {
+ return input_ptr_ - other.input_ptr_;
+ }
private:
friend class Node;
- explicit iterator(Node* from, int index = 0)
- : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
+ explicit iterator(Use* use, Node** input_ptr)
+ : use_(use), input_ptr_(input_ptr) {}
Use* use_;
Node** input_ptr_;
@@ -423,57 +472,71 @@ class Node::InputEdges::iterator final {
Node::InputEdges::iterator Node::InputEdges::begin() const {
- return Node::InputEdges::iterator(this->node_, 0);
+ return Node::InputEdges::iterator(use_root_, input_root_);
}
Node::InputEdges::iterator Node::InputEdges::end() const {
- return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+ return Node::InputEdges::iterator(use_root_ - count_, input_root_ + count_);
}
+Edge Node::InputEdges::operator[](int index) const {
+ return Edge(use_root_ + index, input_root_ + index);
+}
// A forward iterator to visit the inputs of a node.
class Node::Inputs::const_iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef Node* value_type;
- typedef Node** pointer;
- typedef Node*& reference;
+ typedef const value_type* pointer;
+ typedef value_type& reference;
- const_iterator(const const_iterator& other) : iter_(other.iter_) {}
+ const_iterator(const const_iterator& other) : input_ptr_(other.input_ptr_) {}
- Node* operator*() const { return (*iter_).to(); }
+ Node* operator*() const { return *input_ptr_; }
bool operator==(const const_iterator& other) const {
- return iter_ == other.iter_;
+ return input_ptr_ == other.input_ptr_;
}
bool operator!=(const const_iterator& other) const {
return !(*this == other);
}
const_iterator& operator++() {
- ++iter_;
+ ++input_ptr_;
return *this;
}
const_iterator operator++(int);
+ const_iterator& operator+=(difference_type offset) {
+ input_ptr_ += offset;
+ return *this;
+ }
+ const_iterator operator+(difference_type offset) const {
+ return const_iterator(input_ptr_ + offset);
+ }
+ difference_type operator-(const const_iterator& other) const {
+ return input_ptr_ - other.input_ptr_;
+ }
private:
friend class Node::Inputs;
- const_iterator(Node* node, int index) : iter_(node, index) {}
+ explicit const_iterator(Node* const* input_ptr) : input_ptr_(input_ptr) {}
- Node::InputEdges::iterator iter_;
+ Node* const* input_ptr_;
};
Node::Inputs::const_iterator Node::Inputs::begin() const {
- return const_iterator(this->node_, 0);
+ return const_iterator(input_root_);
}
Node::Inputs::const_iterator Node::Inputs::end() const {
- return const_iterator(this->node_, this->node_->InputCount());
+ return const_iterator(input_root_ + count_);
}
+Node* Node::Inputs::operator[](int index) const { return input_root_[index]; }
// A forward iterator to visit the uses edges of a node.
class Node::UseEdges::iterator final {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index fdbe001de3..1d90095769 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -25,6 +25,8 @@
V(Deoptimize) \
V(DeoptimizeIf) \
V(DeoptimizeUnless) \
+ V(TrapIf) \
+ V(TrapUnless) \
V(Return) \
V(TailCall) \
V(Terminate) \
@@ -104,7 +106,9 @@
#define JS_SIMPLE_BINOP_LIST(V) \
JS_COMPARE_BINOP_LIST(V) \
JS_BITWISE_BINOP_LIST(V) \
- JS_ARITH_BINOP_LIST(V)
+ JS_ARITH_BINOP_LIST(V) \
+ V(JSInstanceOf) \
+ V(JSOrdinaryHasInstance)
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
@@ -122,26 +126,26 @@
JS_CONVERSION_UNOP_LIST(V) \
JS_OTHER_UNOP_LIST(V)
-#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateArray) \
- V(JSCreateClosure) \
- V(JSCreateIterResultObject) \
- V(JSCreateKeyValueArray) \
- V(JSCreateLiteralArray) \
- V(JSCreateLiteralObject) \
- V(JSCreateLiteralRegExp) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSLoadGlobal) \
- V(JSStoreProperty) \
- V(JSStoreNamed) \
- V(JSStoreGlobal) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
- V(JSInstanceOf) \
- V(JSOrdinaryHasInstance)
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateClosure) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateKeyValueArray) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadGlobal) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSStoreGlobal) \
+ V(JSStoreDataPropertyInLiteral) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
+ V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
@@ -154,6 +158,7 @@
#define JS_OTHER_OP_LIST(V) \
V(JSCallConstruct) \
+ V(JSCallConstructWithSpread) \
V(JSCallFunction) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
@@ -294,6 +299,7 @@
V(PlainPrimitiveToWord32) \
V(PlainPrimitiveToFloat64) \
V(BooleanNot) \
+ V(StringCharAt) \
V(StringCharCodeAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
@@ -301,6 +307,7 @@
V(CheckIf) \
V(CheckMaps) \
V(CheckNumber) \
+ V(CheckInternalizedString) \
V(CheckString) \
V(CheckSmi) \
V(CheckHeapObject) \
@@ -322,6 +329,8 @@
V(ObjectIsSmi) \
V(ObjectIsString) \
V(ObjectIsUndetectable) \
+ V(NewRestParameterElements) \
+ V(NewUnmappedArgumentsElements) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
@@ -527,6 +536,7 @@
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
+ V(ProtectedStore) \
V(AtomicLoad) \
V(AtomicStore) \
V(UnsafePointerAdd)
@@ -553,9 +563,6 @@
V(Float32x4LessThanOrEqual) \
V(Float32x4GreaterThan) \
V(Float32x4GreaterThanOrEqual) \
- V(Float32x4Select) \
- V(Float32x4Swizzle) \
- V(Float32x4Shuffle) \
V(Float32x4FromInt32x4) \
V(Float32x4FromUint32x4) \
V(CreateInt32x4) \
@@ -574,9 +581,6 @@
V(Int32x4LessThanOrEqual) \
V(Int32x4GreaterThan) \
V(Int32x4GreaterThanOrEqual) \
- V(Int32x4Select) \
- V(Int32x4Swizzle) \
- V(Int32x4Shuffle) \
V(Int32x4FromFloat32x4) \
V(Uint32x4Min) \
V(Uint32x4Max) \
@@ -709,7 +713,10 @@
V(Simd128And) \
V(Simd128Or) \
V(Simd128Xor) \
- V(Simd128Not)
+ V(Simd128Not) \
+ V(Simd32x4Select) \
+ V(Simd32x4Swizzle) \
+ V(Simd32x4Shuffle)
#define MACHINE_SIMD_OP_LIST(V) \
MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
@@ -793,6 +800,10 @@ class V8_EXPORT_PRIVATE IrOpcode {
(kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
(kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
}
+
+ static bool IsContextChainExtendingOpcode(Value value) {
+ return kJSCreateFunctionContext <= value && value <= kJSCreateScriptContext;
+ }
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 9198f4b9a9..c422f0986b 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -366,8 +366,9 @@ Type* OperationTyper::NumberExpm1(Type* type) {
Type* OperationTyper::NumberFloor(Type* type) {
DCHECK(type->Is(Type::Number()));
if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return cache_.kIntegerOrMinusZeroOrNaN;
+ type = Type::Intersect(type, Type::MinusZeroOrNaN(), zone());
+ type = Type::Union(type, cache_.kInteger, zone());
+ return type;
}
Type* OperationTyper::NumberFround(Type* type) {
@@ -624,12 +625,19 @@ Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
}
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- // Division is tricky, so all we do is try ruling out nan.
+ // Division is tricky, so all we do is try ruling out -0 and NaN.
+ bool maybe_minuszero = !lhs->Is(cache_.kPositiveIntegerOrNaN) ||
+ !rhs->Is(cache_.kPositiveIntegerOrNaN);
bool maybe_nan =
lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
- return maybe_nan ? Type::Number() : Type::OrderedNumber();
+
+ // Take into account the -0 and NaN information computed earlier.
+ Type* type = Type::PlainNumber();
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
}
Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
@@ -796,8 +804,35 @@ Type* OperationTyper::NumberShiftLeft(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- // TODO(turbofan): Infer a better type here.
- return Type::Signed32();
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToUint32(rhs);
+
+ int32_t min_lhs = lhs->Min();
+ int32_t max_lhs = lhs->Max();
+ uint32_t min_rhs = rhs->Min();
+ uint32_t max_rhs = rhs->Max();
+ if (max_rhs > 31) {
+ // rhs can be larger than the bitmask
+ max_rhs = 31;
+ min_rhs = 0;
+ }
+
+ if (max_lhs > (kMaxInt >> max_rhs) || min_lhs < (kMinInt >> max_rhs)) {
+ // overflow possible
+ return Type::Signed32();
+ }
+
+ double min =
+ std::min(static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << min_rhs),
+ static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << max_rhs));
+ double max =
+ std::max(static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << min_rhs),
+ static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << max_rhs));
+
+ if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+ return Type::Range(min, max, zone());
}
Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
@@ -809,33 +844,18 @@ Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
lhs = NumberToInt32(lhs);
rhs = NumberToUint32(rhs);
- double min = kMinInt;
- double max = kMaxInt;
- if (lhs->Min() >= 0) {
- // Right-shifting a non-negative value cannot make it negative, nor larger.
- min = std::max(min, 0.0);
- max = std::min(max, lhs->Max());
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
- }
+ int32_t min_lhs = lhs->Min();
+ int32_t max_lhs = lhs->Max();
+ uint32_t min_rhs = rhs->Min();
+ uint32_t max_rhs = rhs->Max();
+ if (max_rhs > 31) {
+ // rhs can be larger than the bitmask
+ max_rhs = 31;
+ min_rhs = 0;
}
- if (lhs->Max() < 0) {
- // Right-shifting a negative value cannot make it non-negative, nor smaller.
- min = std::max(min, lhs->Min());
- max = std::min(max, -1.0);
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
- }
- }
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- // Right-shifting by a positive value yields a small integer value.
- double shift_min = kMinInt >> static_cast<int>(rhs->Min());
- double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
- min = std::max(min, shift_min);
- max = std::min(max, shift_max);
- }
- // TODO(jarin) Ideally, the following micro-optimization should be performed
- // by the type constructor.
+ double min = std::min(min_lhs >> min_rhs, min_lhs >> max_rhs);
+ double max = std::max(max_lhs >> min_rhs, max_lhs >> max_rhs);
+
if (max == kMaxInt && min == kMinInt) return Type::Signed32();
return Type::Range(min, max, zone());
}
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 0a9e6448e2..02b2f64a30 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -78,6 +78,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
case IrOpcode::kJSDeleteProperty:
// Context operations
@@ -93,6 +94,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Call operations
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallConstructWithSpread:
case IrOpcode::kJSCallFunction:
// Misc operations
@@ -100,6 +102,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSGetSuperConstructor:
return true;
default:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index a2dc4305a3..687424b66f 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -268,28 +268,7 @@ void SetTypeForOsrValue(Node* osr_value, Node* loop,
}
}
- OsrGuardType guard_type = OsrGuardType::kAny;
- // Find the phi that uses the OsrGuard node and get the type from
- // there. Skip the search if the OsrGuard does not have value use
- // (i.e., if there is other use beyond the effect use).
- if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
- osr_guard->UseCount() > 1) {
- Type* type = nullptr;
- for (Node* use : osr_guard->uses()) {
- if (use->opcode() == IrOpcode::kPhi) {
- if (NodeProperties::GetControlInput(use) != loop) continue;
- CHECK_NULL(type);
- type = NodeProperties::GetType(use);
- }
- }
- CHECK_NOT_NULL(type);
-
- if (type->Is(Type::SignedSmall())) {
- guard_type = OsrGuardType::kSignedSmall;
- }
- }
-
- NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+ NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
}
} // namespace
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 2614155722..d0f4f18ea3 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -65,7 +65,6 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/tail-call-optimization.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
@@ -75,6 +74,7 @@
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -111,39 +111,51 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+ is_asm_ = info->shared_info()->asm_function();
}
// For WASM compile entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
- SourcePositionTable* source_positions)
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
graph_zone_scope_(zone_stats_, ZONE_NAME),
- graph_(graph),
+ graph_(jsgraph->graph()),
source_positions_(source_positions),
+ machine_(jsgraph->machine()),
+ common_(jsgraph->common()),
+ javascript_(jsgraph->javascript()),
+ jsgraph_(jsgraph),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ protected_instructions_(protected_instructions) {
+ is_asm_ =
+ info->has_shared_info() ? info->shared_info()->asm_function() : false;
+ }
// For machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
- Schedule* schedule)
+ Schedule* schedule, SourcePositionTable* source_positions)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_(graph),
- source_positions_(new (info->zone()) SourcePositionTable(graph_)),
+ source_positions_(source_positions),
schedule_(schedule),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+ is_asm_ = false;
+ }
// For register allocation testing entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
InstructionSequence* sequence)
@@ -156,7 +168,10 @@ class PipelineData {
instruction_zone_(sequence->zone()),
sequence_(sequence),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+ is_asm_ =
+ info->has_shared_info() ? info->shared_info()->asm_function() : false;
+ }
~PipelineData() {
DeleteRegisterAllocationZone();
@@ -170,6 +185,11 @@ class PipelineData {
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
+
+ bool is_asm() const { return is_asm_; }
+ bool verify_graph() const { return verify_graph_; }
+ void set_verify_graph(bool value) { verify_graph_ = value; }
+
Handle<Code> code() { return code_; }
void set_code(Handle<Code> code) {
DCHECK(code_.is_null());
@@ -199,12 +219,6 @@ class PipelineData {
loop_assignment_ = loop_assignment;
}
- TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
- void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
- DCHECK_NULL(type_hint_analysis_);
- type_hint_analysis_ = type_hint_analysis;
- }
-
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
DCHECK(!schedule_);
@@ -233,6 +247,11 @@ class PipelineData {
source_position_output_ = source_position_output;
}
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
+ const {
+ return protected_instructions_;
+ }
+
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
@@ -240,7 +259,6 @@ class PipelineData {
graph_ = nullptr;
source_positions_ = nullptr;
loop_assignment_ = nullptr;
- type_hint_analysis_ = nullptr;
simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
@@ -293,7 +311,7 @@ class PipelineData {
DCHECK(register_allocation_data_ == nullptr);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), debug_name_.get());
+ sequence(), debug_name());
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -308,6 +326,8 @@ class PipelineData {
}
}
+ const char* debug_name() const { return debug_name_.get(); }
+
private:
Isolate* const isolate_;
CompilationInfo* const info_;
@@ -316,6 +336,8 @@ class PipelineData {
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
bool compilation_failed_ = false;
+ bool verify_graph_ = false;
+ bool is_asm_ = false;
Handle<Code> code_ = Handle<Code>::null();
// All objects in the following group of fields are allocated in graph_zone_.
@@ -325,7 +347,6 @@ class PipelineData {
Graph* graph_ = nullptr;
SourcePositionTable* source_positions_ = nullptr;
LoopAssignmentAnalysis* loop_assignment_ = nullptr;
- TypeHintAnalysis* type_hint_analysis_ = nullptr;
SimplifiedOperatorBuilder* simplified_ = nullptr;
MachineOperatorBuilder* machine_ = nullptr;
CommonOperatorBuilder* common_ = nullptr;
@@ -355,6 +376,9 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
+ nullptr;
+
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
@@ -555,27 +579,29 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (info()->shared_info()->asm_function()) {
- if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+ if (info()->osr_frame() && !info()->is_optimizing_from_bytecode()) {
+ info()->MarkAsFrameSpecializing();
+ }
info()->MarkAsFunctionContextSpecializing();
} else {
if (!FLAG_always_opt) {
info()->MarkAsBailoutOnUninitialized();
}
- if (FLAG_turbo_inlining) {
- info()->MarkAsInliningEnabled();
+ if (FLAG_turbo_loop_peeling) {
+ info()->MarkAsLoopPeelingEnabled();
}
}
- if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+ if (info()->is_optimizing_from_bytecode() ||
+ !info()->shared_info()->asm_function()) {
info()->MarkAsDeoptimizationEnabled();
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
}
if (!info()->is_optimizing_from_bytecode()) {
- if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
- info()->MarkAsTypeFeedbackEnabled();
- }
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+ } else if (FLAG_turbo_inlining) {
+ info()->MarkAsInliningEnabled();
}
linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
@@ -612,15 +638,18 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
class PipelineWasmCompilationJob final : public CompilationJob {
public:
- explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
- CallDescriptor* descriptor,
- SourcePositionTable* source_positions)
+ explicit PipelineWasmCompilationJob(
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
+ bool allow_signalling_nan)
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
- data_(&zone_stats_, info, graph, source_positions),
+ data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
pipeline_(&data_),
- linkage_(descriptor) {}
+ linkage_(descriptor),
+ allow_signalling_nan_(allow_signalling_nan) {}
protected:
Status PrepareJobImpl() final;
@@ -632,6 +661,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
+ bool allow_signalling_nan_;
};
PipelineWasmCompilationJob::Status
@@ -649,6 +679,24 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
pipeline_.RunPrintAndVerify("Machine", true);
+ if (FLAG_wasm_opt) {
+ PipelineData* data = &data_;
+ PipelineRunScope scope(data, "WASM optimization");
+ JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
+ MachineOperatorReducer machine_reducer(data->jsgraph(),
+ allow_signalling_nan_);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ pipeline_.RunPrintAndVerify("Optimized Machine", true);
+ }
if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
return SUCCEEDED;
@@ -694,20 +742,6 @@ struct LoopAssignmentAnalysisPhase {
};
-struct TypeHintAnalysisPhase {
- static const char* phase_name() { return "type hint analysis"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- if (data->info()->is_type_feedback_enabled()) {
- TypeHintAnalyzer analyzer(data->graph_zone());
- Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
- TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
- data->set_type_hint_analysis(type_hint_analysis);
- }
- }
-};
-
-
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
@@ -715,15 +749,18 @@ struct GraphBuilderPhase {
bool succeeded = false;
if (data->info()->is_optimizing_from_bytecode()) {
- BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
- data->jsgraph(), 1.0f,
- data->source_positions());
+ // Bytecode graph builder assumes deoptimziation is enabled.
+ DCHECK(data->info()->is_deoptimization_enabled());
+ BytecodeGraphBuilder graph_builder(
+ temp_zone, data->info()->shared_info(),
+ handle(data->info()->closure()->feedback_vector()),
+ data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+ data->source_positions());
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), 1.0f,
- data->loop_assignment(), data->type_hint_analysis(),
- data->source_positions());
+ data->loop_assignment(), data->source_positions());
succeeded = graph_builder.CreateGraph();
}
@@ -744,9 +781,6 @@ struct InliningPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
- call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
- }
if (data->info()->is_deoptimization_enabled()) {
call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
}
@@ -817,21 +851,6 @@ struct TyperPhase {
}
};
-struct OsrTyperPhase {
- static const char* phase_name() { return "osr typer"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- NodeVector roots(temp_zone);
- data->jsgraph()->GetCachedNodes(&roots);
- // Dummy induction variable optimizer: at the moment, we do not try
- // to compute loop variable bounds on OSR.
- LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
- data->common(), temp_zone);
- Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
- typer.Run(roots, &induction_vars);
- }
-};
-
struct UntyperPhase {
static const char* phase_name() { return "untyper"; }
@@ -944,8 +963,8 @@ struct EscapeAnalysisPhase {
}
};
-struct RepresentationSelectionPhase {
- static const char* phase_name() { return "representation selection"; }
+struct SimplifiedLoweringPhase {
+ static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
@@ -978,6 +997,23 @@ struct LoopExitEliminationPhase {
}
};
+struct ConcurrentOptimizationPrepPhase {
+ static const char* phase_name() {
+ return "concurrent optimization preparation";
+ }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // Make sure we cache these code stubs.
+ data->jsgraph()->CEntryStubConstant(1);
+ data->jsgraph()->CEntryStubConstant(2);
+ data->jsgraph()->CEntryStubConstant(3);
+
+ // This is needed for escape analysis.
+ NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
+ NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
+ }
+};
+
struct GenericLoweringPhase {
static const char* phase_name() { return "generic lowering"; }
@@ -1178,21 +1214,6 @@ struct LateGraphTrimmingPhase {
};
-struct StressLoopPeelingPhase {
- static const char* phase_name() { return "stress loop peeling"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- // Peel the first outer loop for testing.
- // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
- LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
- if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
- LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
- loop_tree->outer_loops()[0], temp_zone);
- }
- }
-};
-
-
struct ComputeSchedulePhase {
static const char* phase_name() { return "scheduling"; }
@@ -1404,7 +1425,7 @@ struct GenerateCodePhase {
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
CodeGenerator generator(data->frame(), linkage, data->sequence(),
- data->info());
+ data->info(), data->protected_instructions());
data->set_code(generator.GenerateCode());
}
};
@@ -1475,8 +1496,6 @@ bool PipelineImpl::CreateGraph() {
Run<LoopAssignmentAnalysisPhase>();
}
- Run<TypeHintAnalysisPhase>();
-
Run<GraphBuilderPhase>();
if (data->compilation_failed()) {
data->EndPhaseKind();
@@ -1486,8 +1505,6 @@ bool PipelineImpl::CreateGraph() {
// Perform OSR deconstruction.
if (info()->is_osr()) {
- Run<OsrTyperPhase>();
-
Run<OsrDeconstructionPhase>();
Run<UntyperPhase>();
@@ -1512,7 +1529,7 @@ bool PipelineImpl::CreateGraph() {
// Determine the Typer operation flags.
Typer::Flags flags = Typer::kNoFlags;
if (is_sloppy(info()->shared_info()->language_mode()) &&
- !info()->shared_info()->IsBuiltin()) {
+ info()->shared_info()->IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
flags |= Typer::kThisIsReceiver;
}
@@ -1533,43 +1550,50 @@ bool PipelineImpl::CreateGraph() {
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
+ }
- if (FLAG_turbo_loop_peeling) {
- Run<LoopPeelingPhase>();
- RunPrintAndVerify("Loops peeled", true);
- } else {
- Run<LoopExitEliminationPhase>();
- RunPrintAndVerify("Loop exits eliminated", true);
- }
+ // Do some hacky things to prepare for the optimization phase.
+ // (caching handles, etc.).
+ Run<ConcurrentOptimizationPrepPhase>();
- if (FLAG_turbo_stress_loop_peeling) {
- Run<StressLoopPeelingPhase>();
- RunPrintAndVerify("Loop peeled");
- }
+ data->EndPhaseKind();
- if (!info()->shared_info()->asm_function()) {
- if (FLAG_turbo_load_elimination) {
- Run<LoadEliminationPhase>();
- RunPrintAndVerify("Load eliminated");
- }
+ return true;
+}
- if (FLAG_turbo_escape) {
- Run<EscapeAnalysisPhase>();
- if (data->compilation_failed()) {
- info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
- data->EndPhaseKind();
- return false;
- }
- RunPrintAndVerify("Escape Analysed");
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ if (data->info()->is_loop_peeling_enabled()) {
+ Run<LoopPeelingPhase>();
+ RunPrintAndVerify("Loops peeled", true);
+ } else {
+ Run<LoopExitEliminationPhase>();
+ RunPrintAndVerify("Loop exits eliminated", true);
+ }
+
+ if (!data->is_asm()) {
+ if (FLAG_turbo_load_elimination) {
+ Run<LoadEliminationPhase>();
+ RunPrintAndVerify("Load eliminated");
+ }
+
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ if (data->compilation_failed()) {
+ info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ data->EndPhaseKind();
+ return false;
}
+ RunPrintAndVerify("Escape Analysed");
}
}
- // Select representations. This has to run w/o the Typer decorator, because
- // we cannot compute meaningful types anyways, and the computed types might
- // even conflict with the representation/truncation logic.
- Run<RepresentationSelectionPhase>();
- RunPrintAndVerify("Representations selected", true);
+ // Perform simplified lowering. This has to run w/o the Typer decorator,
+ // because we cannot compute meaningful types anyways, and the computed types
+ // might even conflict with the representation/truncation logic.
+ Run<SimplifiedLoweringPhase>();
+ RunPrintAndVerify("Simplified lowering", true);
#ifdef DEBUG
// From now on it is invalid to look at types on the nodes, because:
@@ -1592,14 +1616,6 @@ bool PipelineImpl::CreateGraph() {
Run<GenericLoweringPhase>();
RunPrintAndVerify("Generic lowering", true);
- data->EndPhaseKind();
-
- return true;
-}
-
-bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
- PipelineData* data = this->data_;
-
data->BeginPhaseKind("block building");
// Run early optimization pass.
@@ -1648,7 +1664,9 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
- PipelineData data(&zone_stats, &info, graph, schedule);
+ SourcePositionTable source_positions(graph);
+ PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
+ data.set_verify_graph(FLAG_csa_verify);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
@@ -1660,6 +1678,12 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
if (FLAG_trace_turbo) {
{
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
+ }
+ {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -1696,13 +1720,16 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
- CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule) {
+Handle<Code> Pipeline::GenerateCodeForTesting(
+ CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, SourcePositionTable* source_positions) {
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(info->isolate()->allocator());
- PipelineData data(&zone_stats, info, graph, schedule);
+ // TODO(wasm): Refactor code generation to check for non-existing source
+ // table, then remove this conditional allocation.
+ if (!source_positions)
+ source_positions = new (info->zone()) SourcePositionTable(graph);
+ PipelineData data(&zone_stats, info, graph, schedule, source_positions);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
@@ -1729,10 +1756,13 @@ CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
// static
CompilationJob* Pipeline::NewWasmCompilationJob(
- CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions) {
- return new PipelineWasmCompilationJob(info, graph, descriptor,
- source_positions);
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ bool allow_signalling_nan) {
+ return new PipelineWasmCompilationJob(
+ info, jsgraph, descriptor, source_positions, protected_instructions,
+ allow_signalling_nan);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1767,12 +1797,27 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
info(), data->graph(), data->schedule()));
}
- if (FLAG_turbo_verify_machine_graph != nullptr &&
- (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
- !strcmp(FLAG_turbo_verify_machine_graph,
- data->info()->GetDebugName().get()))) {
+ bool verify_stub_graph = data->verify_graph();
+ if (verify_stub_graph ||
+ (FLAG_turbo_verify_machine_graph != nullptr &&
+ (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+ !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
+ if (FLAG_trace_csa_verify) {
+ AllowHandleDereference allow_deref;
+ CompilationInfo* info = data->info();
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "--------------------------------------------------\n"
+ << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n"
+ << *data->schedule()
+ << "--------------------------------------------------\n"
+ << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n";
+ }
Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+ data->info()->IsStub(), data->debug_name(),
&temp_zone);
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 0c0a57b286..0c3e4ea7cb 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -9,6 +9,7 @@
// Do not include anything from src/compiler here!
#include "src/globals.h"
#include "src/objects.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -17,9 +18,14 @@ class CompilationInfo;
class CompilationJob;
class RegisterConfiguration;
+namespace trap_handler {
+struct ProtectedInstructionData;
+} // namespace trap_handler
+
namespace compiler {
class CallDescriptor;
+class JSGraph;
class Graph;
class InstructionSequence;
class Schedule;
@@ -32,8 +38,11 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
- CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions);
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions,
+ bool wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
@@ -60,10 +69,10 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
- CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule = nullptr);
+ static Handle<Code> GenerateCodeForTesting(
+ CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule = nullptr,
+ SourcePositionTable* source_positions = nullptr);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index a838ede47c..56755d2446 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -34,6 +34,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case kFlags_branch:
case kFlags_deoptimize:
case kFlags_set:
+ case kFlags_trap:
return SetRC;
case kFlags_none:
return LeaveRC;
@@ -263,7 +264,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
// Overflow checked for add/sub only.
switch (op) {
#if V8_TARGET_ARCH_PPC64
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_Sub:
#endif
case kPPC_AddWithOverflow32:
@@ -276,7 +278,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kNotOverflow:
switch (op) {
#if V8_TARGET_ARCH_PPC64
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_Sub:
#endif
case kPPC_AddWithOverflow32:
@@ -761,36 +764,33 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- Register result = i.OutputRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- __ sync(); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- __ asm_instrx(result, operand); \
- } \
- __ bind(&done); \
- __ cmp(result, result); \
- __ bne(&done); \
- __ isync(); \
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ lwsync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, &index); \
- Register value = i.InputRegister(index); \
- __ sync(); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- __ asm_instrx(value, operand); \
- } \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ __ lwsync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ sync(); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1322,7 +1322,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
63 - i.InputInt32(2), i.OutputRCBit());
break;
#endif
- case kPPC_Add:
+ case kPPC_Add32:
#if V8_TARGET_ARCH_PPC64
if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
ASSEMBLE_ADD_WITH_OVERFLOW();
@@ -1335,10 +1335,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
}
+ __ extsw(i.OutputRegister(), i.OutputRegister());
#if V8_TARGET_ARCH_PPC64
}
#endif
break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Add64:
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ } else {
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ }
+ break;
+#endif
case kPPC_AddWithOverflow32:
ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
@@ -1431,19 +1447,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
break;
case kPPC_Mod32:
- ASSEMBLE_MODULO(divw, mullw);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divw, mullw);
+ }
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_Mod64:
- ASSEMBLE_MODULO(divd, mulld);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divd, mulld);
+ }
break;
#endif
case kPPC_ModU32:
- ASSEMBLE_MODULO(divwu, mullw);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divwu, mullw);
+ }
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ModU64:
- ASSEMBLE_MODULO(divdu, mulld);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divdu, mulld);
+ }
break;
#endif
case kPPC_ModDouble:
@@ -1984,6 +2016,82 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ PPCOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED, true);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+
+ ArchOpcode op = instr->arch_opcode();
+ CRegister cr = cr0;
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kPPC_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(&end, cr);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel, cr);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel, cr);
+ __ bind(&end);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2257,11 +2365,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_PPC64
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
#else
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -2270,11 +2376,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
#endif
__ mov(dst, Operand(src.ToInt64()));
#if V8_TARGET_ARCH_PPC64
@@ -2313,8 +2418,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ double value;
+// bit_cast of snan is converted to qnan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ intptr_t valueInt = (src.type() == Constant::kFloat32)
+ ? src.ToFloat32AsInt()
+ : src.ToFloat64AsInt();
+ if (valueInt == ((src.type() == Constant::kFloat32)
+ ? 0x7fa00000
+ : 0x7fa0000000000000)) {
+ value = bit_cast<double, int64_t>(0x7ff4000000000000L);
+ } else {
+#endif
+ value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ }
+#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 9198bcb00c..f68ab3ae68 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -33,7 +33,8 @@ namespace compiler {
V(PPC_RotLeftAndClear64) \
V(PPC_RotLeftAndClearLeft64) \
V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
V(PPC_AddWithOverflow32) \
V(PPC_AddPair) \
V(PPC_AddDouble) \
@@ -42,7 +43,7 @@ namespace compiler {
V(PPC_SubPair) \
V(PPC_SubDouble) \
V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul32WithHigh32) \
V(PPC_Mul64) \
V(PPC_MulHigh32) \
V(PPC_MulHighU32) \
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index dee84943fa..640a7e439a 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -35,7 +35,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_RotLeftAndClear64:
case kPPC_RotLeftAndClearLeft64:
case kPPC_RotLeftAndClearRight64:
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_AddWithOverflow32:
case kPPC_AddPair:
case kPPC_AddDouble:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 768b188aaa..c7e1fa34c1 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -155,6 +155,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -339,6 +342,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -836,7 +844,7 @@ void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
}
void InstructionSelector::VisitInt32PairAdd(Node* node) {
- VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
+ VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
@@ -1013,13 +1021,13 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
}
#endif
@@ -1481,11 +1489,11 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
&cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
}
@@ -1530,9 +1538,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1693,7 +1704,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
kInt16Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1782,6 +1793,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 14695c11b8..a318dd02ae 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -4,10 +4,10 @@
#include "src/compiler/raw-machine-assembler.h"
-#include "src/code-factory.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -51,12 +51,12 @@ Schedule* RawMachineAssembler::Export() {
os << *schedule_;
}
schedule_->EnsureCFGWellFormedness();
+ Scheduler::ComputeSpecialRPO(zone(), schedule_);
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
os << *schedule_;
}
- Scheduler::ComputeSpecialRPO(zone(), schedule_);
// Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
schedule_ = nullptr;
@@ -170,295 +170,28 @@ void RawMachineAssembler::Comment(const char* msg) {
AddNode(machine()->Comment(msg));
}
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
- Node** args) {
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 1;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- return AddNode(common()->Call(desc), input_count, buffer);
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
+ Node* const* inputs) {
+ DCHECK(!desc->NeedsFrameState());
+ // +1 is for target.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+ return AddNode(common()->Call(desc), input_count, inputs);
}
-
Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
- Node* function, Node** args,
- Node* frame_state) {
+ int input_count,
+ Node* const* inputs) {
DCHECK(desc->NeedsFrameState());
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 2;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- buffer[index++] = frame_state;
- return AddNode(common()->Call(desc), input_count, buffer);
-}
-
-Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
- Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(0);
-
- return AddNode(common()->Call(descriptor), centry, ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg1, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(1);
-
- return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
-}
-
-
-Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(2);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
- context);
-}
-
-Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(3);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
- arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(4);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
- ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(5);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
- arg5, ref, arity, context);
-}
-
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
- Node** args) {
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 1;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
+ // +2 is for target and frame state.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 2);
+ return AddNode(common()->Call(desc), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
- Node* context) {
- const int kArity = 0;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
- Node* arg1, Node* context) {
- const int kArity = 1;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-
-Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
- Node* arg1, Node* arg2,
- Node* context) {
- const int kArity = 2;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* context) {
- const int kArity = 3;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* context) {
- const int kArity = 4;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5,
- Node* context) {
- const int kArity = 5;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* arg6,
- Node* context) {
- const int kArity = 6;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4,
- arg5, arg6, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
+ Node* const* inputs) {
+ // +1 is for target.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+ Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -502,6 +235,21 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0, arg1);
}
+Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type,
+ MachineType arg2_type, Node* function,
+ Node* arg0, Node* arg1, Node* arg2) {
+ MachineSignature::Builder builder(zone(), 1, 3);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+}
Node* RawMachineAssembler::CallCFunction8(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 6d2accb861..af36b8c08a 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -653,6 +653,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Float64RoundTiesEven(Node* a) {
return AddNode(machine()->Float64RoundTiesEven().op(), a);
}
+ Node* Word32ReverseBytes(Node* a) {
+ return AddNode(machine()->Word32ReverseBytes().op(), a);
+ }
+ Node* Word64ReverseBytes(Node* a) {
+ return AddNode(machine()->Word64ReverseBytes().op(), a);
+ }
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
@@ -701,26 +707,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Call a given call descriptor and the given arguments.
- Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+ // The call target is passed as part of the {inputs} array.
+ Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
// Call a given call descriptor and the given arguments and frame-state.
- Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
- Node* frame_state);
- // Call to a runtime function with zero arguments.
- Node* CallRuntime0(Runtime::FunctionId function, Node* context);
- // Call to a runtime function with one arguments.
- Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
- // Call to a runtime function with two arguments.
- Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* context);
- // Call to a runtime function with three arguments.
- Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* context);
- // Call to a runtime function with four arguments.
- Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* context);
- // Call to a runtime function with five arguments.
- Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* context);
+ // The call target and frame state are passed as part of the {inputs} array.
+ Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+ Node* const* inputs);
+
+ // Tail call a given call descriptor and the given arguments.
+ // The call target is passed as part of the {inputs} array.
+ Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -730,6 +728,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
Node* arg1);
+ // Call to a C function with three arguments.
+ Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2);
// Call to a C function with eight arguments.
Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
@@ -739,30 +741,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
- // Tail call the given call descriptor and the given arguments.
- Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
- // Tail call to a runtime function with zero arguments.
- Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
- // Tail call to a runtime function with one argument.
- Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
- Node* context);
- // Tail call to a runtime function with two arguments.
- Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* context);
- // Tail call to a runtime function with three arguments.
- Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* context);
- // Tail call to a runtime function with four arguments.
- Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* context);
- // Tail call to a runtime function with five arguments.
- Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* context);
- // Tail call to a runtime function with six arguments.
- Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* arg6,
- Node* context);
-
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
// the current basic block or create new basic blocks for labels.
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 6dcf2bf4cf..707752f364 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -16,11 +16,13 @@ RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
RedundancyElimination::~RedundancyElimination() {}
Reduction RedundancyElimination::Reduce(Node* node) {
+ if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckFloat64Hole:
case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
+ case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
@@ -36,6 +38,11 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedUint32ToInt32:
return ReduceCheckNode(node);
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ // For increments and decrements by a constant, try to learn from the last
+ // bounds check.
+ return TryReuseBoundsCheckForFirstInput(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
@@ -114,7 +121,14 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
bool IsCompatibleCheck(Node const* a, Node const* b) {
- if (a->op() != b->op()) return false;
+ if (a->op() != b->op()) {
+ if (a->opcode() == IrOpcode::kCheckInternalizedString &&
+ b->opcode() == IrOpcode::kCheckString) {
+ // CheckInternalizedString(node) implies CheckString(node)
+ } else {
+ return false;
+ }
+ }
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
if (a->InputAt(i) != b->InputAt(i)) return false;
}
@@ -133,6 +147,17 @@ Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
return nullptr;
}
+Node* RedundancyElimination::EffectPathChecks::LookupBoundsCheckFor(
+ Node* node) const {
+ for (Check const* check = head_; check != nullptr; check = check->next) {
+ if (check->node->opcode() == IrOpcode::kCheckBounds &&
+ check->node->InputAt(0) == node) {
+ return check->node;
+ }
+ }
+ return nullptr;
+}
+
RedundancyElimination::EffectPathChecks const*
RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
size_t const id = node->id();
@@ -158,10 +183,41 @@ Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
ReplaceWithValue(node, check);
return Replace(check);
}
+
// Learn from this check.
return UpdateChecks(node, checks->AddCheck(zone(), node));
}
+Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ node->opcode() == IrOpcode::kSpeculativeNumberSubtract);
+
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ // Only use bounds checks for increments/decrements by a constant.
+ if (right->opcode() == IrOpcode::kNumberConstant) {
+ if (Node* bounds_check = checks->LookupBoundsCheckFor(left)) {
+ // Only use the bounds checked type if it is better.
+ if (NodeProperties::GetType(bounds_check)
+ ->Is(NodeProperties::GetType(left))) {
+ node->ReplaceInput(0, bounds_check);
+ }
+ }
+ }
+
+ return UpdateChecks(node, checks);
+}
+
Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
if (control->opcode() == IrOpcode::kLoop) {
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 88f9032a84..786c9608df 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -34,6 +34,7 @@ class RedundancyElimination final : public AdvancedReducer {
EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
Node* LookupCheck(Node* node) const;
+ Node* LookupBoundsCheckFor(Node* node) const;
private:
EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
@@ -62,6 +63,8 @@ class RedundancyElimination final : public AdvancedReducer {
Reduction TakeChecksFromFirstEffect(Node* node);
Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
+ Reduction TryReuseBoundsCheckForFirstInput(Node* node);
+
Zone* zone() const { return zone_; }
PathChecksForEffectNodes node_checks_;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index cefd04af1f..5a2ed93827 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -300,6 +300,27 @@ void BlockAssessments::DropRegisters() {
}
}
+void BlockAssessments::Print() const {
+ OFStream os(stdout);
+ for (const auto pair : map()) {
+ const InstructionOperand op = pair.first;
+ const Assessment* assessment = pair.second;
+ // Use operator<< so we can write the assessment on the same
+ // line. Since we need a register configuration, just pick
+ // Turbofan for now.
+ PrintableInstructionOperand wrapper = {RegisterConfiguration::Turbofan(),
+ op};
+ os << wrapper << " : ";
+ if (assessment->kind() == AssessmentKind::Final) {
+ os << "v" << FinalAssessment::cast(assessment)->virtual_register();
+ } else {
+ os << "P";
+ }
+ os << std::endl;
+ }
+ os << std::endl;
+}
+
BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
const InstructionBlock* block) {
RpoNumber current_block_id = block->rpo_number();
@@ -352,8 +373,9 @@ void RegisterAllocatorVerifier::ValidatePendingAssessment(
// for the original operand (the one where the assessment was created for
// first) are also pending. To avoid recursion, we use a work list. To
// deal with cycles, we keep a set of seen nodes.
- ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
- ZoneSet<RpoNumber> seen(zone());
+ Zone local_zone(zone()->allocator(), ZONE_NAME);
+ ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(&local_zone);
+ ZoneSet<RpoNumber> seen(&local_zone);
worklist.push(std::make_pair(assessment, virtual_register));
seen.insert(block_id);
@@ -448,7 +470,11 @@ void RegisterAllocatorVerifier::ValidateFinalAssessment(
// is virtual_register.
const PendingAssessment* old = assessment->original_pending_assessment();
CHECK_NOT_NULL(old);
- ValidatePendingAssessment(block_id, op, current_assessments, old,
+ RpoNumber old_block = old->origin()->rpo_number();
+ DCHECK_LE(old_block, block_id);
+ BlockAssessments* old_block_assessments =
+ old_block == block_id ? current_assessments : assessments_[old_block];
+ ValidatePendingAssessment(old_block, op, old_block_assessments, old,
virtual_register);
}
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 0ed479fa99..5515843612 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -2985,7 +2985,7 @@ void LinearScanAllocator::FindFreeRegistersForRange(
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
DCHECK_GE(positions.length(), num_regs);
- for (int i = 0; i < num_regs; i++) {
+ for (int i = 0; i < num_regs; ++i) {
positions[i] = LifetimePosition::MaxPosition();
}
@@ -3009,9 +3009,17 @@ void LinearScanAllocator::FindFreeRegistersForRange(
for (LiveRange* cur_inactive : inactive_live_ranges()) {
DCHECK(cur_inactive->End() > range->Start());
+ int cur_reg = cur_inactive->assigned_register();
+ // No need to carry out intersections, when this register won't be
+ // interesting to this range anyway.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ positions[cur_reg] < range->Start()) {
+ continue;
+ }
+
LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
if (kSimpleFPAliasing || !check_fp_aliasing()) {
positions[cur_reg] = Min(positions[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
@@ -3111,8 +3119,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(
const int* codes = allocatable_register_codes();
MachineRepresentation rep = current->representation();
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kSimd128))
+ rep == MachineRepresentation::kSimd128)) {
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ }
DCHECK_GE(free_until_pos.length(), num_codes);
@@ -3166,6 +3175,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
rep == MachineRepresentation::kSimd128))
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ // use_pos keeps track of positions a register/alias is used at.
+ // block_pos keeps track of positions where a register/alias is blocked
+ // from.
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_regs; i++) {
@@ -3181,6 +3193,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
+ DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
+ block_pos[cur_reg]);
use_pos[cur_reg] =
range->NextLifetimePositionRegisterIsBeneficial(current->Start());
}
@@ -3196,7 +3210,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition::GapFromInstructionIndex(0);
} else {
use_pos[aliased_reg] =
- range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+ Min(block_pos[aliased_reg],
+ range->NextLifetimePositionRegisterIsBeneficial(
+ current->Start()));
}
}
}
@@ -3204,10 +3220,23 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
for (LiveRange* range : inactive_live_ranges()) {
DCHECK(range->End() > current->Start());
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
bool is_fixed = range->TopLevel()->IsFixed();
+
+ // Don't perform costly intersections if they are guaranteed to not update
+ // block_pos or use_pos.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+ if (is_fixed) {
+ if (block_pos[cur_reg] < range->Start()) continue;
+ } else {
+ if (use_pos[cur_reg] < range->Start()) continue;
+ }
+ }
+
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+
if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 4d002cc3c6..7a5a43e61a 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
@@ -587,33 +588,33 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
- if (output_type->Is(Type::Unsigned32())) {
- op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- op = machine()->TruncateFloat64ToWord32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type->Is(Type::Unsigned32())) {
- op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- op = machine()->TruncateFloat64ToWord32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
if (output_type->Is(Type::Signed32())) {
@@ -627,16 +628,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
- if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->ChangeTaggedToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
- } else {
- op = simplified()->TruncateTaggedToWord32();
- }
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedSignedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
@@ -644,6 +637,14 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ if (output_type->Is(Type::NumberOrOddball())) {
+ op = simplified()->TruncateTaggedToWord32();
+ } else if (use_info.type_check() != TypeCheckKind::kNone) {
+ op = simplified()->CheckedTruncateTaggedToWord32();
+ }
}
} else if (output_rep == MachineRepresentation::kWord32) {
// Only the checked case should get here, the non-checked case is
@@ -694,8 +695,12 @@ Node* RepresentationChanger::GetBitRepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
+ HeapObjectMatcher m(node);
+ if (m.Is(factory()->false_value())) {
+ return jsgraph()->Int32Constant(0);
+ } else if (m.Is(factory()->true_value())) {
+ return jsgraph()->Int32Constant(1);
+ }
}
default:
break;
@@ -812,6 +817,24 @@ const Operator* RepresentationChanger::Int32OverflowOperatorFor(
}
}
+const Operator* RepresentationChanger::TaggedSignedOperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberLessThan:
+ return machine()->Is32() ? machine()->Int32LessThan()
+ : machine()->Int64LessThan();
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return machine()->Is32() ? machine()->Int32LessThanOrEqual()
+ : machine()->Int64LessThanOrEqual();
+ case IrOpcode::kSpeculativeNumberEqual:
+ return machine()->Is32() ? machine()->Word32Equal()
+ : machine()->Word64Equal();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
const Operator* RepresentationChanger::Uint32OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index d7895da825..4fa7d917b7 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -238,6 +238,7 @@ class RepresentationChanger final {
UseInfo use_info);
const Operator* Int32OperatorFor(IrOpcode::Value opcode);
const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
+ const Operator* TaggedSignedOperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
const Operator* Float64OperatorFor(IrOpcode::Value opcode);
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 5dcc82f7a0..f99ab37838 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -119,6 +119,16 @@ class S390OperandConverter final : public InstructionOperandConverter {
InstructionOperand* op = instr_->InputAt(index);
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
+
+ MemOperand InputStackSlot32(size_t index) {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand mem = InputStackSlot(index);
+ return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ return InputStackSlot(index);
+#endif
+ }
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
@@ -335,9 +345,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
} while (0)
-#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
- do { \
- __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
} while (0)
// Divide instruction dr will implicity use register pair
@@ -1223,25 +1233,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Not64:
__ Not64(i.OutputRegister(), i.InputRegister(0));
break;
- case kS390_RotLeftAndMask32:
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- int shiftAmount = i.InputInt32(1);
- int endBit = 63 - i.InputInt32(3);
- int startBit = 63 - i.InputInt32(2);
- __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
- __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
- Operand(endBit), Operand::Zero(), true);
- } else {
- int shiftAmount = i.InputInt32(1);
- int clearBitLeft = 63 - i.InputInt32(2);
- int clearBitRight = i.InputInt32(3);
- __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
- __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
- __ srlg(i.OutputRegister(), i.OutputRegister(),
- Operand((clearBitLeft + clearBitRight)));
- __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
- }
- break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -1357,16 +1348,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else if (HasImmediateInput(instr, 1)) {
__ Mul32(i.InputRegister(0), i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // msy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ Mul32(i.InputRegister(0), kScratchReg);
-#else
- __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
-#endif
+ __ Mul32(i.InputRegister(0), i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1387,16 +1369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ mr_z(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // mfy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ mr_z(r0, kScratchReg);
-#else
- __ mfy(r0, i.InputStackSlot(1));
-#endif
+ __ mfy(r0, i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1413,16 +1386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ mlr(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // mfy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ mlr(r0, kScratchReg);
-#else
- __ ml(r0, i.InputStackSlot(1));
-#endif
+ __ ml(r0, i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1692,21 +1656,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_Tst32:
if (HasRegisterInput(instr, 1)) {
- __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+ __ lr(r0, i.InputRegister(0));
+ __ nr(r0, i.InputRegister(1));
} else {
- __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ Operand opnd = i.InputImmediate(1);
+ if (is_uint16(opnd.immediate())) {
+ __ tmll(i.InputRegister(0), opnd);
+ } else {
+ __ lr(r0, i.InputRegister(0));
+ __ nilf(r0, opnd);
+ }
}
- __ LoadAndTestP_ExtendSrc(r0, r0);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Tst64:
if (HasRegisterInput(instr, 1)) {
__ AndP(r0, i.InputRegister(0), i.InputRegister(1));
} else {
- __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ Operand opnd = i.InputImmediate(1);
+ if (is_uint16(opnd.immediate())) {
+ __ tmll(i.InputRegister(0), opnd);
+ } else {
+ __ AndP(r0, i.InputRegister(0), opnd);
+ }
}
break;
-#endif
case kS390_Float64SilenceNaN: {
DoubleRegister value = i.InputDoubleRegister(0);
DoubleRegister result = i.OutputDoubleRegister();
@@ -2152,6 +2125,82 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ S390OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+
+ ArchOpcode op = instr->arch_opcode();
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kS390_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(&end);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel);
+ __ bind(&end);
+}
+
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
@@ -2377,11 +2426,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_S390X
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
#else
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -2390,11 +2437,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ mov(dst, Operand(src.ToInt64()));
}
#else
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index 80e1532adb..ad5d7cbc74 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -31,7 +31,6 @@ namespace compiler {
V(S390_RotRight64) \
V(S390_Not32) \
V(S390_Not64) \
- V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 5ebe489e39..8fc1cfb8be 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -32,7 +32,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_RotRight64:
case kS390_Not32:
case kS390_Not64:
- case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index eed08a9c44..d906c17fbe 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -261,6 +261,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -440,6 +443,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -539,6 +547,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
g.UseOperand(length, kUint32Imm), g.UseRegister(value));
}
+#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation32(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
@@ -549,6 +558,7 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
*me = mask_lsb;
return true;
}
+#endif
#if V8_TARGET_ARCH_S390X
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
@@ -564,36 +574,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
#endif
void InstructionSelector::VisitWord32And(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- int mb = 0;
- int me = 0;
- if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
- int sh = 0;
- Node* left = m.left().node();
- if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
- CanCover(node, left)) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 31)) {
- left = mleft.left().node();
- sh = mleft.right().Value();
- if (m.left().IsWord32Shr()) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
- } else {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (me < sh) me = sh;
- }
- }
- }
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
- g.TempImmediate(me));
- return;
- }
- }
VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
}
@@ -685,25 +665,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
- int mb;
- int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (me < sh) me = sh;
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
- g.TempImmediate(mb), g.TempImmediate(me));
- return;
- }
- }
- }
VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
}
@@ -752,26 +713,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
- int mb;
- int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
- g.TempImmediate(mb), g.TempImmediate(me));
- return;
- }
- }
- }
VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
}
@@ -1541,9 +1482,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1620,9 +1564,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
+ case IrOpcode::kWord32Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kS390_Tst64, cont,
+ true, kUint32Imm);
+ default:
+ break;
+ }
+ }
+ }
return VisitWord32Compare(selector, value, cont);
+ }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -1636,9 +1598,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_S390X
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(selector, value, kS390_Tst64, cont,
+ true, kUint32Imm);
+ default:
+ break;
+ }
+ }
+ }
return VisitWord64Compare(selector, value, cont);
+ }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
@@ -1781,6 +1761,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
S390OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index eb3dda8c26..dcc84b31ed 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -407,7 +407,7 @@ void Schedule::PropagateDeferredMark() {
if (!block->deferred()) {
bool deferred = block->PredecessorCount() > 0;
for (auto pred : block->predecessors()) {
- if (!pred->deferred()) {
+ if (!pred->deferred() && (pred->rpo_number() < block->rpo_number())) {
deferred = false;
}
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index c5a94b4297..a11d8bc4cc 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -58,6 +58,9 @@ void SimdScalarLowering::LowerGraph() {
// that they are processed after all other nodes.
PreparePhiReplacement(input);
stack_.push_front({input, 0});
+ } else if (input->opcode() == IrOpcode::kEffectPhi ||
+ input->opcode() == IrOpcode::kLoop) {
+ stack_.push_front({input, 0});
} else {
stack_.push_back({input, 0});
}
@@ -70,12 +73,14 @@ void SimdScalarLowering::LowerGraph() {
#define FOREACH_INT32X4_OPCODE(V) \
V(Int32x4Add) \
V(Int32x4ExtractLane) \
- V(CreateInt32x4)
+ V(CreateInt32x4) \
+ V(Int32x4ReplaceLane)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(Float32x4Add) \
V(Float32x4ExtractLane) \
- V(CreateFloat32x4)
+ V(CreateFloat32x4) \
+ V(Float32x4ReplaceLane)
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
@@ -102,7 +107,7 @@ static int GetParameterIndexAfterLowering(
// In function calls, the simd128 types are passed as 4 Int32 types. The
// parameters are typecast to the types as needed for various operations.
int result = old_index;
- for (int i = 0; i < old_index; i++) {
+ for (int i = 0; i < old_index; ++i) {
if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
result += 3;
}
@@ -123,7 +128,7 @@ int SimdScalarLowering::GetParameterCountAfterLowering() {
static int GetReturnCountAfterLowering(
Signature<MachineRepresentation>* signature) {
int result = static_cast<int>(signature->return_count());
- for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+ for (int i = 0; i < static_cast<int>(signature->return_count()); ++i) {
if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
result += 3;
}
@@ -131,6 +136,100 @@ static int GetReturnCountAfterLowering(
return result;
}
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
+ new_indices[0] = index;
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(
+ static_cast<int>(i) * kLaneWidth)));
+ }
+}
+
+void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
+ const Operator* load_op) {
+ if (rep == MachineRepresentation::kSimd128) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* indices[kMaxLanes];
+ GetIndexNodes(index, indices);
+ Node* rep_nodes[kMaxLanes];
+ rep_nodes[0] = node;
+ NodeProperties::ChangeOp(rep_nodes[0], load_op);
+ if (node->InputCount() > 2) {
+ DCHECK(node->InputCount() > 3);
+ Node* effect_input = node->InputAt(2);
+ Node* control_input = node->InputAt(3);
+ rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
+ control_input);
+ rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
+ control_input);
+ rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
+ control_input);
+ rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
+ } else {
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
+ }
+ }
+ ReplaceNode(node, rep_nodes);
+ } else {
+ DefaultLowering(node);
+ }
+}
+
+void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
+ const Operator* store_op,
+ SimdType rep_type) {
+ if (rep == MachineRepresentation::kSimd128) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* indices[kMaxLanes];
+ GetIndexNodes(index, indices);
+ DCHECK(node->InputCount() > 2);
+ Node* value = node->InputAt(2);
+ DCHECK(HasReplacement(1, value));
+ Node* rep_nodes[kMaxLanes];
+ rep_nodes[0] = node;
+ Node** rep_inputs = GetReplacementsWithType(value, rep_type);
+ rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+ NodeProperties::ChangeOp(node, store_op);
+ if (node->InputCount() > 3) {
+ DCHECK(node->InputCount() > 4);
+ Node* effect_input = node->InputAt(3);
+ Node* control_input = node->InputAt(4);
+ rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
+ effect_input, control_input);
+ rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
+ rep_nodes[3], control_input);
+ rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
+ rep_nodes[2], control_input);
+ rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
+
+ } else {
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ rep_nodes[i] =
+ graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
+ }
+ }
+
+ ReplaceNode(node, rep_nodes);
+ } else {
+ DefaultLowering(node);
+ }
+}
+
+void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type,
+ const Operator* op) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ ReplaceNode(node, rep_node);
+}
+
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
switch (node->opcode()) {
@@ -159,13 +258,13 @@ void SimdScalarLowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
Node* new_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
new_node[i] = nullptr;
}
new_node[0] = node;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kSimd128) {
- for (int i = 1; i < kMaxLanes; i++) {
+ for (int i = 1; i < kMaxLanes; ++i) {
new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
graph()->start());
}
@@ -175,6 +274,57 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kLoad: {
+ MachineRepresentation rep =
+ LoadRepresentationOf(node->op()).representation();
+ const Operator* load_op;
+ if (rep_type == SimdType::kInt32) {
+ load_op = machine()->Load(MachineType::Int32());
+ } else if (rep_type == SimdType::kFloat32) {
+ load_op = machine()->Load(MachineType::Float32());
+ }
+ LowerLoadOp(rep, node, load_op);
+ break;
+ }
+ case IrOpcode::kUnalignedLoad: {
+ MachineRepresentation rep =
+ UnalignedLoadRepresentationOf(node->op()).representation();
+ const Operator* load_op;
+ if (rep_type == SimdType::kInt32) {
+ load_op = machine()->UnalignedLoad(MachineType::Int32());
+ } else if (rep_type == SimdType::kFloat32) {
+ load_op = machine()->UnalignedLoad(MachineType::Float32());
+ }
+ LowerLoadOp(rep, node, load_op);
+ break;
+ }
+ case IrOpcode::kStore: {
+ MachineRepresentation rep =
+ StoreRepresentationOf(node->op()).representation();
+ WriteBarrierKind write_barrier_kind =
+ StoreRepresentationOf(node->op()).write_barrier_kind();
+ const Operator* store_op;
+ if (rep_type == SimdType::kInt32) {
+ store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, write_barrier_kind));
+ } else {
+ store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kFloat32, write_barrier_kind));
+ }
+ LowerStoreOp(rep, node, store_op, rep_type);
+ break;
+ }
+ case IrOpcode::kUnalignedStore: {
+ MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+ const Operator* store_op;
+ if (rep_type == SimdType::kInt32) {
+ store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+ } else {
+ store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+ }
+ LowerStoreOp(rep, node, store_op, rep_type);
+ break;
+ }
case IrOpcode::kReturn: {
DefaultLowering(node);
int new_return_count = GetReturnCountAfterLowering(signature());
@@ -200,7 +350,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
rep_node[i] =
graph()->NewNode(common()->Projection(i), node, graph()->start());
}
@@ -214,7 +364,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
// The replacement nodes have already been created, we only have to
// replace placeholder nodes.
Node** rep_node = GetReplacements(node);
- for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node** rep_input =
GetReplacementsWithType(node->InputAt(i), rep_type);
for (int j = 0; j < kMaxLanes; j++) {
@@ -226,75 +376,51 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
-
case IrOpcode::kInt32x4Add: {
- DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- rep_node[i] =
- graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
- }
- ReplaceNode(node, rep_node);
- break;
- }
-
- case IrOpcode::kCreateInt32x4: {
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- DCHECK(!HasReplacement(1, node->InputAt(i)));
- rep_node[i] = node->InputAt(i);
- }
- ReplaceNode(node, rep_node);
- break;
- }
-
- case IrOpcode::kInt32x4ExtractLane: {
- Node* laneNode = node->InputAt(1);
- DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
- int32_t lane = OpParameter<int32_t>(laneNode);
- Node* rep_node[kMaxLanes] = {
- GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
- nullptr, nullptr};
- ReplaceNode(node, rep_node);
+ LowerBinaryOp(node, rep_type, machine()->Int32Add());
break;
}
-
case IrOpcode::kFloat32x4Add: {
- DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
- rep_right[i]);
- }
- ReplaceNode(node, rep_node);
+ LowerBinaryOp(node, rep_type, machine()->Float32Add());
break;
}
-
+ case IrOpcode::kCreateInt32x4:
case IrOpcode::kCreateFloat32x4: {
Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- DCHECK(!HasReplacement(1, node->InputAt(i)));
- rep_node[i] = node->InputAt(i);
+ for (int i = 0; i < kMaxLanes; ++i) {
+ if (HasReplacement(0, node->InputAt(i))) {
+ rep_node[i] = GetReplacements(node->InputAt(i))[0];
+ } else {
+ rep_node[i] = node->InputAt(i);
+ }
}
ReplaceNode(node, rep_node);
break;
}
-
+ case IrOpcode::kInt32x4ExtractLane:
case IrOpcode::kFloat32x4ExtractLane: {
- Node* laneNode = node->InputAt(1);
- DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
- int32_t lane = OpParameter<int32_t>(laneNode);
+ int32_t lane = OpParameter<int32_t>(node);
Node* rep_node[kMaxLanes] = {
GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
nullptr, nullptr};
ReplaceNode(node, rep_node);
break;
}
-
+ case IrOpcode::kInt32x4ReplaceLane:
+ case IrOpcode::kFloat32x4ReplaceLane: {
+ DCHECK_EQ(2, node->InputCount());
+ Node* repNode = node->InputAt(1);
+ int32_t lane = OpParameter<int32_t>(node);
+ DCHECK(lane >= 0 && lane <= 3);
+ Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ if (HasReplacement(0, repNode)) {
+ rep_node[lane] = GetReplacements(repNode)[0];
+ } else {
+ rep_node[lane] = repNode;
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
default: { DefaultLowering(node); }
}
}
@@ -322,7 +448,7 @@ void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
DCHECK(new_node[0] != nullptr ||
(new_node[1] == nullptr && new_node[2] == nullptr &&
new_node[3] == nullptr));
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
replacements_[old->id()].node[i] = new_node[i];
}
}
@@ -348,7 +474,7 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
}
Node** result = zone()->NewArray<Node*>(kMaxLanes);
if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
replacements[i]);
@@ -357,7 +483,7 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
}
}
} else {
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
replacements[i]);
@@ -379,17 +505,17 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
int value_count = phi->op()->ValueInputCount();
SimdType type = ReplacementType(phi);
Node** inputs_rep[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
}
- for (int i = 0; i < value_count; i++) {
+ for (int i = 0; i < value_count; ++i) {
for (int j = 0; j < kMaxLanes; j++) {
inputs_rep[j][i] = placeholder_;
}
}
Node* rep_nodes[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (type == SimdType::kInt32) {
rep_nodes[i] = graph()->NewNode(
common()->Phi(MachineRepresentation::kWord32, value_count),
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 39449f4b9f..c795c6b88b 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -31,6 +31,7 @@ class SimdScalarLowering {
enum class SimdType : uint8_t { kInt32, kFloat32 };
static const int kMaxLanes = 4;
+ static const int kLaneWidth = 16 / kMaxLanes;
struct Replacement {
Node* node[kMaxLanes];
@@ -53,6 +54,12 @@ class SimdScalarLowering {
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
+ void GetIndexNodes(Node* index, Node** new_indices);
+ void LowerLoadOp(MachineRepresentation rep, Node* node,
+ const Operator* load_op);
+ void LowerStoreOp(MachineRepresentation rep, Node* node,
+ const Operator* store_op, SimdType rep_type);
+ void LowerBinaryOp(Node* node, SimdType rep_type, const Operator* op);
struct NodeState {
Node* node;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index c90d7437bf..c9fda35b36 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -209,8 +209,30 @@ class InputUseInfos {
#endif // DEBUG
-} // namespace
+bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
+ Zone* type_zone) {
+ // We assume the inputs are checked Signed32 (or known statically
+ // to be Signed32). Technically, theinputs could also be minus zero, but
+ // that cannot cause overflow.
+ left = Type::Intersect(left, Type::Signed32(), type_zone);
+ right = Type::Intersect(right, Type::Signed32(), type_zone);
+ if (!left->IsInhabited() || !right->IsInhabited()) return false;
+ switch (op->opcode()) {
+ case IrOpcode::kSpeculativeNumberAdd:
+ return (left->Max() + right->Max() > kMaxInt) ||
+ (left->Min() + right->Min() < kMinInt);
+
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return (left->Max() - right->Min() > kMaxInt) ||
+ (left->Min() - right->Max() < kMinInt);
+
+ default:
+ UNREACHABLE();
+ }
+ return true;
+}
+} // namespace
class RepresentationSelector {
public:
@@ -675,6 +697,11 @@ class RepresentationSelector {
GetUpperBound(node->InputAt(1))->Is(type);
}
+ bool IsNodeRepresentationTagged(Node* node) {
+ MachineRepresentation representation = GetInfo(node)->representation();
+ return IsAnyTagged(representation);
+ }
+
bool OneInputCannotBe(Node* node, Type* type) {
DCHECK_EQ(2, node->op()->ValueInputCount());
return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
@@ -988,8 +1015,9 @@ class RepresentationSelector {
machine_type.semantic() == MachineSemantic::kUint32);
(*types)[i] = machine_type;
}
- NodeProperties::ChangeOp(node,
- jsgraph_->common()->TypedStateValues(types));
+ SparseInputMask mask = SparseInputMaskOf(node->op());
+ NodeProperties::ChangeOp(
+ node, jsgraph_->common()->TypedStateValues(types, mask));
}
SetOutput(node, MachineRepresentation::kTagged);
}
@@ -1002,9 +1030,14 @@ class RepresentationSelector {
// TODO(turbofan): Special treatment for ExternalPointer here,
// to avoid incompatible truncations. We really need a story
// for the JSFunction::entry field.
- UseInfo use_info = input_type->Is(Type::ExternalPointer())
- ? UseInfo::PointerInt()
- : UseInfo::Any();
+ UseInfo use_info = UseInfo::None();
+ if (input_type->IsInhabited()) {
+ if (input_type->Is(Type::ExternalPointer())) {
+ use_info = UseInfo::PointerInt();
+ } else {
+ use_info = UseInfo::Any();
+ }
+ }
EnqueueInput(node, i, use_info);
}
} else if (lower()) {
@@ -1019,7 +1052,9 @@ class RepresentationSelector {
// TODO(turbofan): Special treatment for ExternalPointer here,
// to avoid incompatible truncations. We really need a story
// for the JSFunction::entry field.
- if (input_type->Is(Type::ExternalPointer())) {
+ if (!input_type->IsInhabited()) {
+ (*types)[i] = MachineType::None();
+ } else if (input_type->Is(Type::ExternalPointer())) {
(*types)[i] = MachineType::Pointer();
} else {
MachineRepresentation rep = input_type->IsInhabited()
@@ -1080,17 +1115,14 @@ class RepresentationSelector {
return kNoWriteBarrier;
}
if (value_type->IsHeapConstant()) {
- Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
- RootIndexMap root_index_map(jsgraph_->isolate());
- int root_index = root_index_map.Lookup(*value_object);
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
- // Write barriers are unnecessary for immortal immovable roots.
- return kNoWriteBarrier;
- }
- if (value_object->IsMap()) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
+ Heap::RootListIndex root_index;
+ Heap* heap = jsgraph_->isolate()->heap();
+ if (heap->IsRootHandle(value_type->AsHeapConstant()->Value(),
+ &root_index)) {
+ if (heap->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
}
}
if (field_representation == MachineRepresentation::kTaggedPointer ||
@@ -1164,6 +1196,7 @@ class RepresentationSelector {
if (BothInputsAre(node, Type::PlainPrimitive())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
+
if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node)->Is(Type::Signed32()) ||
GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1177,33 +1210,38 @@ class RepresentationSelector {
// Try to use type feedback.
NumberOperationHint hint = NumberOperationHintOf(node->op());
- // Handle the case when no int32 checks on inputs are necessary
- // (but an overflow check is needed on the output).
- if (BothInputsAre(node, Type::Signed32()) ||
- (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
- NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
- // If both the inputs the feedback are int32, use the overflow op.
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ Type* left_feedback_type = TypeOf(node->InputAt(0));
+ Type* right_feedback_type = TypeOf(node->InputAt(1));
+ // Handle the case when no int32 checks on inputs are necessary (but
+ // an overflow check is needed on the output).
+ // TODO(jarin) We should not look at the upper bound because the typer
+ // could have already baked in some feedback into the upper bound.
+ if (BothInputsAre(node, Type::Signed32()) ||
+ (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
+ GetUpperBound(node)->Is(type_cache_.kSafeInteger))) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
- return;
+ } else {
+ UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+ // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+ // a minus zero check for the right hand side, since we already
+ // know that the left hand side is a proper Signed32 value,
+ // potentially guarded by a check.
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+ hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+ VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+ Type::Signed32());
+ }
+ if (lower()) {
+ if (CanOverflowSigned32(node->op(), left_feedback_type,
+ right_feedback_type, graph_zone())) {
+ ChangeToInt32OverflowOp(node);
+ } else {
+ ChangeToPureOp(node, Int32Op(node));
+ }
}
- }
-
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
- // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
- // a minus zero check for the right hand side, since we already
- // know that the left hand side is a proper Signed32 value,
- // potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(
- hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
- VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
return;
}
@@ -1550,13 +1588,38 @@ class RepresentationSelector {
NumberOperationHint hint = NumberOperationHintOf(node->op());
switch (hint) {
case NumberOperationHint::kSignedSmall:
- case NumberOperationHint::kSigned32:
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kBit);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ case NumberOperationHint::kSigned32: {
+ if (propagate()) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kBit);
+ } else if (retype()) {
+ SetOutput(node, MachineRepresentation::kBit, Type::Any());
+ } else {
+ DCHECK(lower());
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+ if (IsNodeRepresentationTagged(lhs) &&
+ IsNodeRepresentationTagged(rhs)) {
+ VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+ MachineRepresentation::kBit);
+ ChangeToPureOp(
+ node, changer_->TaggedSignedOperatorFor(node->opcode()));
+
+ } else {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kBit);
+ ChangeToPureOp(node, Int32Op(node));
+ }
+ }
return;
- case NumberOperationHint::kNumber:
+ }
case NumberOperationHint::kNumberOrOddball:
+ // Abstract and strict equality don't perform ToNumber conversions
+ // on Oddballs, so make sure we don't accidentially sneak in a
+ // hint with Oddball feedback here.
+ DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
+ // Fallthrough
+ case NumberOperationHint::kNumber:
VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -2156,9 +2219,15 @@ class RepresentationSelector {
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kStringCharAt: {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringCharCodeAt: {
+ // TODO(turbofan): Allow builtins to return untagged values.
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2207,6 +2276,17 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kNone);
return;
}
+ case IrOpcode::kCheckInternalizedString: {
+ if (InputIs(node, Type::InternalizedString())) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
case IrOpcode::kCheckNumber: {
if (InputIs(node, Type::Number())) {
if (truncation.IsUsedAsWord32()) {
@@ -2449,6 +2529,12 @@ class RepresentationSelector {
VisitObjectIs(node, Type::Undetectable(), lowering);
return;
}
+ case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kNewUnmappedArgumentsElements: {
+ ProcessRemainingInputs(node, 0);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kArrayBufferWasNeutered: {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
@@ -2466,8 +2552,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckTaggedHole: {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
return;
}
case IrOpcode::kConvertTaggedHoleToUndefined: {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index b8a486df38..dcfb485156 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -129,6 +129,15 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kCheckedFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue() && IsInt32Double(m.Value())) {
+ Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ break;
+ }
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedTaggedSignedToInt32: {
NodeMatcher m(node->InputAt(0));
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 345a2c5f88..31dac61d7e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -92,6 +92,7 @@ bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+ lhs.map.address() == rhs.map.address() &&
lhs.machine_type == rhs.machine_type;
}
@@ -118,6 +119,10 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
name->Print(os);
os << ", ";
}
+ Handle<Map> map;
+ if (access.map.ToHandle(&map)) {
+ os << Brief(*map) << ", ";
+ }
#endif
access.type->PrintTo(os);
os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
@@ -229,6 +234,44 @@ std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
return os;
}
+std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
+ bool empty = true;
+ if (flags & CheckMapsFlag::kTryMigrateInstance) {
+ os << "TryMigrateInstance";
+ empty = false;
+ }
+ if (empty) os << "None";
+ return os;
+}
+
+bool operator==(CheckMapsParameters const& lhs,
+ CheckMapsParameters const& rhs) {
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+}
+
+bool operator!=(CheckMapsParameters const& lhs,
+ CheckMapsParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CheckMapsParameters const& p) {
+ return base::hash_combine(p.flags(), p.maps());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
+ ZoneHandleSet<Map> const& maps = p.maps();
+ os << p.flags();
+ for (size_t i = 0; i < maps.size(); ++i) {
+ os << ", " << Brief(*maps[i]);
+ }
+ return os;
+}
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kCheckMaps, op->opcode());
+ return OpParameter<CheckMapsParameters>(op);
+}
+
size_t hash_value(CheckTaggedInputMode mode) {
return static_cast<size_t>(mode);
}
@@ -274,22 +317,36 @@ GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator* op) {
return OpParameter<GrowFastElementsFlags>(op);
}
+bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+ return lhs.mode() == rhs.mode() &&
+ lhs.source().address() == rhs.source().address() &&
+ lhs.target().address() == rhs.target().address();
+}
+
+bool operator!=(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+ return !(lhs == rhs);
+}
+
size_t hash_value(ElementsTransition transition) {
- return static_cast<uint8_t>(transition);
+ return base::hash_combine(static_cast<uint8_t>(transition.mode()),
+ transition.source().address(),
+ transition.target().address());
}
std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
- switch (transition) {
+ switch (transition.mode()) {
case ElementsTransition::kFastTransition:
- return os << "fast-transition";
+ return os << "fast-transition from " << Brief(*transition.source())
+ << " to " << Brief(*transition.target());
case ElementsTransition::kSlowTransition:
- return os << "slow-transition";
+ return os << "slow-transition from " << Brief(*transition.source())
+ << " to " << Brief(*transition.target());
}
UNREACHABLE();
return os;
}
-ElementsTransition ElementsTransitionOf(const Operator* op) {
+ElementsTransition const& ElementsTransitionOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
return OpParameter<ElementsTransition>(op);
}
@@ -331,6 +388,12 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
+int ParameterCountOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kNewUnmappedArgumentsElements ||
+ op->opcode() == IrOpcode::kNewRestParameterElements);
+ return OpParameter<int>(op);
+}
+
PretenureFlag PretenureFlagOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
return OpParameter<PretenureFlag>(op);
@@ -395,6 +458,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
@@ -436,6 +500,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(CheckBounds, 2, 1) \
V(CheckHeapObject, 1, 1) \
V(CheckIf, 1, 0) \
+ V(CheckInternalizedString, 1, 1) \
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
@@ -689,16 +754,15 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
return nullptr;
}
-const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
- // TODO(bmeurer): Cache the most important versions of this operator.
- DCHECK_LT(0, map_input_count);
- int const value_input_count = 1 + map_input_count;
- return new (zone()) Operator1<int>( // --
- IrOpcode::kCheckMaps, // opcode
- Operator::kNoThrow | Operator::kNoWrite, // flags
- "CheckMaps", // name
- value_input_count, 1, 1, 0, 1, 0, // counts
- map_input_count); // parameter
+const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
+ ZoneHandleSet<Map> maps) {
+ CheckMapsParameters const parameters(flags, maps);
+ return new (zone()) Operator1<CheckMapsParameters>( // --
+ IrOpcode::kCheckMaps, // opcode
+ Operator::kNoThrow | Operator::kNoWrite, // flags
+ "CheckMaps", // name
+ 1, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
}
const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
@@ -733,10 +797,30 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
IrOpcode::kTransitionElementsKind, // opcode
Operator::kNoDeopt | Operator::kNoThrow, // flags
"TransitionElementsKind", // name
- 3, 1, 1, 0, 1, 0, // counts
+ 1, 1, 1, 0, 1, 0, // counts
transition); // parameter
}
+const Operator* SimplifiedOperatorBuilder::NewUnmappedArgumentsElements(
+ int parameter_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kNewUnmappedArgumentsElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewUnmappedArgumentsElements", // name
+ 0, 1, 0, 1, 1, 0, // counts
+ parameter_count); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewRestParameterElements(
+ int parameter_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kNewRestParameterElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewRestParameterElements", // name
+ 0, 1, 0, 1, 1, 0, // counts
+ parameter_count); // parameter
+}
+
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
switch (pretenure) {
case NOT_TENURED:
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 833a0554f5..4ad44354f8 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -64,6 +65,7 @@ struct FieldAccess {
BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
int offset; // offset of the field, without tag.
MaybeHandle<Name> name; // debugging only.
+ MaybeHandle<Map> map; // map of the field value (if known).
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
@@ -143,6 +145,41 @@ std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+// Flags for map checks.
+enum class CheckMapsFlag : uint8_t {
+ kNone = 0u,
+ kTryMigrateInstance = 1u << 0, // Try instance migration.
+};
+typedef base::Flags<CheckMapsFlag> CheckMapsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
+
+std::ostream& operator<<(std::ostream&, CheckMapsFlags);
+
+// A descriptor for map checks.
+class CheckMapsParameters final {
+ public:
+ CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
+ : flags_(flags), maps_(maps) {}
+
+ CheckMapsFlags flags() const { return flags_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
+
+ private:
+ CheckMapsFlags const flags_;
+ ZoneHandleSet<Map> const maps_;
+};
+
+bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
+bool operator!=(CheckMapsParameters const&, CheckMapsParameters const&);
+
+size_t hash_value(CheckMapsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
+ WARN_UNUSED_RESULT;
+
// A descriptor for growing elements backing stores.
enum class GrowFastElementsFlag : uint8_t {
kNone = 0u,
@@ -160,16 +197,35 @@ GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator*)
WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
-enum class ElementsTransition : uint8_t {
- kFastTransition, // simple transition, just updating the map.
- kSlowTransition // full transition, round-trip to the runtime.
+class ElementsTransition final {
+ public:
+ enum Mode : uint8_t {
+ kFastTransition, // simple transition, just updating the map.
+ kSlowTransition // full transition, round-trip to the runtime.
+ };
+
+ ElementsTransition(Mode mode, Handle<Map> source, Handle<Map> target)
+ : mode_(mode), source_(source), target_(target) {}
+
+ Mode mode() const { return mode_; }
+ Handle<Map> source() const { return source_; }
+ Handle<Map> target() const { return target_; }
+
+ private:
+ Mode const mode_;
+ Handle<Map> const source_;
+ Handle<Map> const target_;
};
+bool operator==(ElementsTransition const&, ElementsTransition const&);
+bool operator!=(ElementsTransition const&, ElementsTransition const&);
+
size_t hash_value(ElementsTransition);
std::ostream& operator<<(std::ostream&, ElementsTransition);
-ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementsTransition const& ElementsTransitionOf(const Operator* op)
+ WARN_UNUSED_RESULT;
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
@@ -186,6 +242,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
+int ParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+
PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
@@ -294,6 +352,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
+ const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
@@ -319,9 +378,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckIf();
const Operator* CheckBounds();
- const Operator* CheckMaps(int map_input_count);
+ const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
const Operator* CheckHeapObject();
+ const Operator* CheckInternalizedString();
const Operator* CheckNumber();
const Operator* CheckSmi();
const Operator* CheckString();
@@ -355,6 +415,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
+ // new-rest-parameter-elements
+ const Operator* NewRestParameterElements(int parameter_count);
+
+ // new-unmapped-arguments-elements
+ const Operator* NewUnmappedArgumentsElements(int parameter_count);
+
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index e8310d7d56..61c71caf87 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -4,6 +4,8 @@
#include "src/compiler/state-values-utils.h"
+#include "src/bit-vector.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -47,6 +49,16 @@ bool StateValuesCache::IsKeysEqualToNode(StateValuesKey* key, Node* node) {
if (key->count != static_cast<size_t>(node->InputCount())) {
return false;
}
+
+ DCHECK(node->opcode() == IrOpcode::kStateValues);
+ SparseInputMask node_mask = SparseInputMaskOf(node->op());
+
+ if (node_mask != key->mask) {
+ return false;
+ }
+
+ // Comparing real inputs rather than sparse inputs, since we already know the
+ // sparse input masks are the same.
for (size_t i = 0; i < key->count; i++) {
if (key->values[i] != node->InputAt(static_cast<int>(i))) {
return false;
@@ -62,6 +74,9 @@ bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
if (key1->count != key2->count) {
return false;
}
+ if (key1->mask != key2->mask) {
+ return false;
+ }
for (size_t i = 0; i < key1->count; i++) {
if (key1->values[i] != key2->values[i]) {
return false;
@@ -73,19 +88,18 @@ bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
Node* StateValuesCache::GetEmptyStateValues() {
if (empty_state_values_ == nullptr) {
- empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+ empty_state_values_ =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
}
return empty_state_values_;
}
-
-NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
- while (working_space_.size() <= level) {
- void* space = zone()->New(sizeof(NodeVector));
- working_space_.push_back(new (space)
- NodeVector(kMaxInputCount, nullptr, zone()));
+StateValuesCache::WorkingBuffer* StateValuesCache::GetWorkingSpace(
+ size_t level) {
+ if (working_space_.size() <= level) {
+ working_space_.resize(level + 1);
}
- return working_space_[level];
+ return &working_space_[level];
}
namespace {
@@ -93,24 +107,24 @@ namespace {
int StateValuesHashKey(Node** nodes, size_t count) {
size_t hash = count;
for (size_t i = 0; i < count; i++) {
- hash = hash * 23 + nodes[i]->id();
+ hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
}
return static_cast<int>(hash & 0x7fffffff);
}
} // namespace
-
-Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
- StateValuesKey key(count, nodes);
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
+ SparseInputMask mask) {
+ StateValuesKey key(count, mask, nodes);
int hash = StateValuesHashKey(nodes, count);
ZoneHashMap::Entry* lookup =
hash_map_.LookupOrInsert(&key, hash, ZoneAllocationPolicy(zone()));
DCHECK_NOT_NULL(lookup);
Node* node;
if (lookup->value == nullptr) {
- int input_count = static_cast<int>(count);
- node = graph()->NewNode(common()->StateValues(input_count), input_count,
+ int node_count = static_cast<int>(count);
+ node = graph()->NewNode(common()->StateValues(node_count, mask), node_count,
nodes);
NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
lookup->key = new_key;
@@ -121,106 +135,190 @@ Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
return node;
}
+SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
+ WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
+ Node** values, size_t count, const BitVector* liveness) {
+ SparseInputMask::BitMaskType input_mask = 0;
-class StateValuesCache::ValueArrayIterator {
- public:
- ValueArrayIterator(Node** values, size_t count)
- : values_(values), count_(count), current_(0) {}
+ // Virtual nodes are the live nodes plus the implicit optimized out nodes,
+ // which are implied by the liveness mask.
+ size_t virtual_node_count = *node_count;
- void Advance() {
- if (!done()) {
- current_++;
- }
- }
+ while (*values_idx < count && *node_count < kMaxInputCount &&
+ virtual_node_count < SparseInputMask::kMaxSparseInputs) {
+ DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
- bool done() { return current_ >= count_; }
+ if (liveness == nullptr ||
+ liveness->Contains(static_cast<int>(*values_idx))) {
+ input_mask |= 1 << (virtual_node_count);
+ (*node_buffer)[(*node_count)++] = values[*values_idx];
+ }
+ virtual_node_count++;
- Node* node() {
- DCHECK(!done());
- return values_[current_];
+ (*values_idx)++;
}
- private:
- Node** values_;
- size_t count_;
- size_t current_;
-};
+ DCHECK(*node_count <= StateValuesCache::kMaxInputCount);
+ DCHECK(virtual_node_count <= SparseInputMask::kMaxSparseInputs);
+ // Add the end marker at the end of the mask.
+ input_mask |= SparseInputMask::kEndMarker << virtual_node_count;
-Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
- if (max_height == 0) {
- Node* node = it->node();
- it->Advance();
- return node;
- }
- DCHECK(!it->done());
+ return input_mask;
+}
- NodeVector* buffer = GetWorkingSpace(max_height);
- size_t count = 0;
- for (; count < kMaxInputCount; count++) {
- if (it->done()) break;
- (*buffer)[count] = BuildTree(it, max_height - 1);
+Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
+ size_t count, const BitVector* liveness,
+ size_t level) {
+ WorkingBuffer* node_buffer = GetWorkingSpace(level);
+ size_t node_count = 0;
+ SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
+
+ if (level == 0) {
+ input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+ values, count, liveness);
+ // Make sure we returned a sparse input mask.
+ DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+ } else {
+ while (*values_idx < count && node_count < kMaxInputCount) {
+ if (count - *values_idx < kMaxInputCount - node_count) {
+ // If we have fewer values remaining than inputs remaining, dump the
+ // remaining values into this node.
+ // TODO(leszeks): We could optimise this further by only counting
+ // remaining live nodes.
+
+ size_t previous_input_count = node_count;
+ input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+ values, count, liveness);
+ // Make sure we have exhausted our values.
+ DCHECK_EQ(*values_idx, count);
+ // Make sure we returned a sparse input mask.
+ DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+
+ // Make sure we haven't touched inputs below previous_input_count in the
+ // mask.
+ DCHECK_EQ(input_mask & ((1 << previous_input_count) - 1), 0u);
+ // Mark all previous inputs as live.
+ input_mask |= ((1 << previous_input_count) - 1);
+
+ break;
+
+ } else {
+ // Otherwise, add the values to a subtree and add that as an input.
+ Node* subtree =
+ BuildTree(values_idx, values, count, liveness, level - 1);
+ (*node_buffer)[node_count++] = subtree;
+ // Don't touch the bitmask, so that it stays dense.
+ }
+ }
}
- if (count == 1) {
- return (*buffer)[0];
+
+ if (node_count == 1 && input_mask == SparseInputMask::kDenseBitMask) {
+ // Elide the StateValue node if there is only one, dense input. This will
+ // only happen if we built a single subtree (as nodes with values are always
+ // sparse), and so we can replace ourselves with it.
+ DCHECK_EQ((*node_buffer)[0]->opcode(), IrOpcode::kStateValues);
+ return (*node_buffer)[0];
} else {
- return GetValuesNodeFromCache(&(buffer->front()), count);
+ return GetValuesNodeFromCache(node_buffer->data(), node_count,
+ SparseInputMask(input_mask));
+ }
+}
+
+#if DEBUG
+namespace {
+
+void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
+ const BitVector* liveness) {
+ CHECK_EQ(count, StateValuesAccess(tree).size());
+
+ int i;
+ auto access = StateValuesAccess(tree);
+ auto it = access.begin();
+ auto itend = access.end();
+ for (i = 0; it != itend; ++it, ++i) {
+ if (liveness == nullptr || liveness->Contains(i)) {
+ CHECK((*it).node == values[i]);
+ } else {
+ CHECK((*it).node == nullptr);
+ }
}
+ CHECK_EQ(static_cast<size_t>(i), count);
}
+} // namespace
+#endif
-Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
+ const BitVector* liveness) {
#if DEBUG
+ // Check that the values represent actual values, and not a tree of values.
for (size_t i = 0; i < count; i++) {
- DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
- DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ if (values[i] != nullptr) {
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ }
+ }
+ if (liveness != nullptr) {
+ // Liveness can have extra bits for the stack or accumulator, which we
+ // ignore here.
+ DCHECK_LE(count, static_cast<size_t>(liveness->length()));
+
+ for (size_t i = 0; i < count; i++) {
+ if (liveness->Contains(static_cast<int>(i))) {
+ DCHECK_NOT_NULL(values[i]);
+ }
+ }
}
#endif
+
if (count == 0) {
return GetEmptyStateValues();
}
+
+ // This is a worst-case tree height estimate, assuming that all values are
+ // live. We could get a better estimate by counting zeroes in the liveness
+ // vector, but there's no point -- any excess height in the tree will be
+ // collapsed by the single-input elision at the end of BuildTree.
size_t height = 0;
- size_t max_nodes = 1;
- while (count > max_nodes) {
+ size_t max_inputs = kMaxInputCount;
+ while (count > max_inputs) {
height++;
- max_nodes *= kMaxInputCount;
+ max_inputs *= kMaxInputCount;
}
- ValueArrayIterator it(values, count);
+ size_t values_idx = 0;
+ Node* tree = BuildTree(&values_idx, values, count, liveness, height);
+ // The values should be exhausted by the end of BuildTree.
+ DCHECK_EQ(values_idx, count);
- Node* tree = BuildTree(&it, height);
+ // The 'tree' must be rooted with a state value node.
+ DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
- // If the 'tree' is a single node, equip it with a StateValues wrapper.
- if (tree->opcode() != IrOpcode::kStateValues &&
- tree->opcode() != IrOpcode::kTypedStateValues) {
- tree = GetValuesNodeFromCache(&tree, 1);
- }
+#if DEBUG
+ CheckTreeContainsValues(tree, values, count, liveness);
+#endif
return tree;
}
-
StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
- // A hacky way initialize - just set the index before the node we want
- // to process and then advance to it.
- stack_[current_depth_].node = node;
- stack_[current_depth_].index = -1;
- Advance();
+ stack_[current_depth_] =
+ SparseInputMaskOf(node->op()).IterateOverInputs(node);
+ EnsureValid();
}
-
-StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+SparseInputMask::InputIterator* StateValuesAccess::iterator::Top() {
DCHECK(current_depth_ >= 0);
DCHECK(current_depth_ < kMaxInlineDepth);
return &(stack_[current_depth_]);
}
-
void StateValuesAccess::iterator::Push(Node* node) {
current_depth_++;
CHECK(current_depth_ < kMaxInlineDepth);
- stack_[current_depth_].node = node;
- stack_[current_depth_].index = 0;
+ stack_[current_depth_] =
+ SparseInputMaskOf(node->op()).IterateOverInputs(node);
}
@@ -234,48 +332,61 @@ bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
void StateValuesAccess::iterator::Advance() {
- // Advance the current index.
- Top()->index++;
+ Top()->Advance();
+ EnsureValid();
+}
- // Fix up the position to point to a valid node.
+void StateValuesAccess::iterator::EnsureValid() {
while (true) {
- // TODO(jarin): Factor to a separate method.
- Node* node = Top()->node;
- int index = Top()->index;
+ SparseInputMask::InputIterator* top = Top();
+
+ if (top->IsEmpty()) {
+ // We are on a valid (albeit optimized out) node.
+ return;
+ }
- if (index >= node->InputCount()) {
- // Pop stack and move to the next sibling.
+ if (top->IsEnd()) {
+ // We have hit the end of this iterator. Pop the stack and move to the
+ // next sibling iterator.
Pop();
if (done()) {
// Stack is exhausted, we have reached the end.
return;
}
- Top()->index++;
- } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
- node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
- // Nested state, we need to push to the stack.
- Push(node->InputAt(index));
- } else {
- // We are on a valid node, we can stop the iteration.
- return;
+ Top()->Advance();
+ continue;
}
- }
-}
+ // At this point the value is known to be live and within our input nodes.
+ Node* value_node = top->GetReal();
+
+ if (value_node->opcode() == IrOpcode::kStateValues ||
+ value_node->opcode() == IrOpcode::kTypedStateValues) {
+ // Nested state, we need to push to the stack.
+ Push(value_node);
+ continue;
+ }
-Node* StateValuesAccess::iterator::node() {
- return Top()->node->InputAt(Top()->index);
+ // We are on a valid node, we can stop the iteration.
+ return;
+ }
}
+Node* StateValuesAccess::iterator::node() { return Top()->Get(nullptr); }
MachineType StateValuesAccess::iterator::type() {
- Node* state = Top()->node;
- if (state->opcode() == IrOpcode::kStateValues) {
+ Node* parent = Top()->parent();
+ if (parent->opcode() == IrOpcode::kStateValues) {
return MachineType::AnyTagged();
} else {
- DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
- ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
- return (*types)[Top()->index];
+ DCHECK_EQ(IrOpcode::kTypedStateValues, parent->opcode());
+
+ if (Top()->IsEmpty()) {
+ return MachineType::None();
+ } else {
+ ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
+ return (*types)[Top()->real_index()];
+ }
}
}
@@ -300,14 +411,24 @@ StateValuesAccess::TypedNode StateValuesAccess::iterator::operator*() {
size_t StateValuesAccess::size() {
size_t count = 0;
- for (int i = 0; i < node_->InputCount(); i++) {
- if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
- node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
- count += StateValuesAccess(node_->InputAt(i)).size();
- } else {
+ SparseInputMask mask = SparseInputMaskOf(node_->op());
+
+ SparseInputMask::InputIterator iterator = mask.IterateOverInputs(node_);
+
+ for (; !iterator.IsEnd(); iterator.Advance()) {
+ if (iterator.IsEmpty()) {
count++;
+ } else {
+ Node* value = iterator.GetReal();
+ if (value->opcode() == IrOpcode::kStateValues ||
+ value->opcode() == IrOpcode::kTypedStateValues) {
+ count += StateValuesAccess(value).size();
+ } else {
+ count++;
+ }
}
}
+
return count;
}
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 14b1b9e599..d5e84d208c 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -5,12 +5,16 @@
#ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
#define V8_COMPILER_STATE_VALUES_UTILS_H_
+#include <array>
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
+class BitVector;
+
namespace compiler {
class Graph;
@@ -19,10 +23,12 @@ class V8_EXPORT_PRIVATE StateValuesCache {
public:
explicit StateValuesCache(JSGraph* js_graph);
- Node* GetNodeForValues(Node** values, size_t count);
+ Node* GetNodeForValues(Node** values, size_t count,
+ const BitVector* liveness = nullptr);
private:
static const size_t kMaxInputCount = 8;
+ typedef std::array<Node*, kMaxInputCount> WorkingBuffer;
struct NodeKey {
Node* node;
@@ -33,22 +39,34 @@ class V8_EXPORT_PRIVATE StateValuesCache {
struct StateValuesKey : public NodeKey {
// ValueArray - array of nodes ({node} has to be nullptr).
size_t count;
+ SparseInputMask mask;
Node** values;
- StateValuesKey(size_t count, Node** values)
- : NodeKey(nullptr), count(count), values(values) {}
+ StateValuesKey(size_t count, SparseInputMask mask, Node** values)
+ : NodeKey(nullptr), count(count), mask(mask), values(values) {}
};
- class ValueArrayIterator;
-
static bool AreKeysEqual(void* key1, void* key2);
static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
- Node* BuildTree(ValueArrayIterator* it, size_t max_height);
- NodeVector* GetWorkingSpace(size_t level);
+ // Fills {node_buffer}, starting from {node_count}, with {values}, starting
+ // at {values_idx}, sparsely encoding according to {liveness}. {node_count} is
+ // updated with the new number of inputs in {node_buffer}, and a bitmask of
+ // the sparse encoding is returned.
+ SparseInputMask::BitMaskType FillBufferWithValues(WorkingBuffer* node_buffer,
+ size_t* node_count,
+ size_t* values_idx,
+ Node** values, size_t count,
+ const BitVector* liveness);
+
+ Node* BuildTree(size_t* values_idx, Node** values, size_t count,
+ const BitVector* liveness, size_t level);
+
+ WorkingBuffer* GetWorkingSpace(size_t level);
Node* GetEmptyStateValues();
- Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+ Node* GetValuesNodeFromCache(Node** nodes, size_t count,
+ SparseInputMask mask);
Graph* graph() { return js_graph_->graph(); }
CommonOperatorBuilder* common() { return js_graph_->common(); }
@@ -57,7 +75,7 @@ class V8_EXPORT_PRIVATE StateValuesCache {
JSGraph* js_graph_;
CustomMatcherZoneHashMap hash_map_;
- ZoneVector<NodeVector*> working_space_; // One working space per level.
+ ZoneVector<WorkingBuffer> working_space_; // One working space per level.
Node* empty_state_values_;
};
@@ -86,21 +104,14 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
MachineType type();
bool done();
void Advance();
+ void EnsureValid();
- struct StatePos {
- Node* node;
- int index;
-
- explicit StatePos(Node* node) : node(node), index(0) {}
- StatePos() {}
- };
-
- StatePos* Top();
+ SparseInputMask::InputIterator* Top();
void Push(Node* node);
void Pop();
static const int kMaxInlineDepth = 8;
- StatePos stack_[kMaxInlineDepth];
+ SparseInputMask::InputIterator stack_[kMaxInlineDepth];
int current_depth_;
};
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 69eaf11616..3d9801bc10 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -64,6 +64,8 @@ class TypeCache final {
Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
Type* const kPositiveIntegerOrMinusZero =
Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+ Type* const kPositiveIntegerOrNaN =
+ Type::Union(kPositiveInteger, Type::NaN(), zone());
Type* const kPositiveIntegerOrMinusZeroOrNaN =
Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
@@ -97,6 +99,11 @@ class TypeCache final {
// [0, String::kMaxLength].
Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
+ // A time value always contains a tagged number in the range
+ // [-kMaxTimeInMs, kMaxTimeInMs].
+ Type* const kTimeValueType =
+ CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs);
+
// The JSDate::day property always contains a tagged number in the range
// [1, 31] or NaN.
Type* const kJSDateDayType =
@@ -123,9 +130,8 @@ class TypeCache final {
// The JSDate::value property always contains a tagged number in the range
// [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
- Type* const kJSDateValueType = Type::Union(
- CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
- Type::NaN(), zone());
+ Type* const kJSDateValueType =
+ Type::Union(kTimeValueType, Type::NaN(), zone());
// The JSDate::weekday property always contains a tagged number in the range
// [0, 6] or NaN.
@@ -137,6 +143,10 @@ class TypeCache final {
Type* const kJSDateYearType =
Type::Union(Type::SignedSmall(), Type::NaN(), zone());
+ // The valid number of arguments for JavaScript functions.
+ Type* const kArgumentsLengthType =
+ Type::Range(0.0, Code::kMaxArguments, zone());
+
private:
template <typename T>
Type* CreateRange() {
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
deleted file mode 100644
index da77a0c997..0000000000
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/type-hint-analyzer.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/ic/ic-state.h"
-#include "src/type-hints.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-BinaryOperationHint ToBinaryOperationHint(Token::Value op,
- BinaryOpICState::Kind kind) {
- switch (kind) {
- case BinaryOpICState::NONE:
- return BinaryOperationHint::kNone;
- case BinaryOpICState::SMI:
- return BinaryOperationHint::kSignedSmall;
- case BinaryOpICState::INT32:
- return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
- ? BinaryOperationHint::kNumberOrOddball
- : BinaryOperationHint::kSigned32;
- case BinaryOpICState::NUMBER:
- return BinaryOperationHint::kNumberOrOddball;
- case BinaryOpICState::STRING:
- return BinaryOperationHint::kString;
- case BinaryOpICState::GENERIC:
- return BinaryOperationHint::kAny;
- }
- UNREACHABLE();
- return BinaryOperationHint::kNone;
-}
-
-CompareOperationHint ToCompareOperationHint(Token::Value op,
- CompareICState::State state) {
- switch (state) {
- case CompareICState::UNINITIALIZED:
- return CompareOperationHint::kNone;
- case CompareICState::SMI:
- return CompareOperationHint::kSignedSmall;
- case CompareICState::NUMBER:
- return Token::IsOrderedRelationalCompareOp(op)
- ? CompareOperationHint::kNumberOrOddball
- : CompareOperationHint::kNumber;
- case CompareICState::STRING:
- case CompareICState::INTERNALIZED_STRING:
- case CompareICState::UNIQUE_NAME:
- case CompareICState::RECEIVER:
- case CompareICState::KNOWN_RECEIVER:
- case CompareICState::BOOLEAN:
- case CompareICState::GENERIC:
- return CompareOperationHint::kAny;
- }
- UNREACHABLE();
- return CompareOperationHint::kNone;
-}
-
-} // namespace
-
-bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
- BinaryOperationHint* hint) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
- *hint = ToBinaryOperationHint(state.op(), state.kind());
- return true;
-}
-
-bool TypeHintAnalysis::GetCompareOperationHint(
- TypeFeedbackId id, CompareOperationHint* hint) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::COMPARE_IC, code->kind());
- CompareICStub stub(code->stub_key(), code->GetIsolate());
- *hint = ToCompareOperationHint(stub.op(), stub.state());
- return true;
-}
-
-bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
- ToBooleanHints* hints) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
- ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
- *hints = stub.hints();
- return true;
-}
-
-TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
- DisallowHeapAllocation no_gc;
- TypeHintAnalysis::Infos infos(zone());
- Isolate* const isolate = code->GetIsolate();
- int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- switch (target->kind()) {
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC:
- case Code::TO_BOOLEAN_IC: {
- // Add this feedback to the {infos}.
- TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
- infos.insert(std::make_pair(id, handle(target, isolate)));
- break;
- }
- default:
- // Ignore the remaining code objects.
- break;
- }
- }
- return new (zone()) TypeHintAnalysis(infos, zone());
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/type-hint-analyzer.h b/deps/v8/src/compiler/type-hint-analyzer.h
deleted file mode 100644
index 354f8943bb..0000000000
--- a/deps/v8/src/compiler/type-hint-analyzer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
-#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
-
-#include "src/handles.h"
-#include "src/type-hints.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// The result of analyzing type hints.
-class TypeHintAnalysis final : public ZoneObject {
- public:
- typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
-
- explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
- : infos_(infos), zone_(zone) {}
-
- bool GetBinaryOperationHint(TypeFeedbackId id,
- BinaryOperationHint* hint) const;
- bool GetCompareOperationHint(TypeFeedbackId id,
- CompareOperationHint* hint) const;
- bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
-
- private:
- Zone* zone() const { return zone_; }
-
- Infos const infos_;
- Zone* zone_;
-};
-
-
-// The class that performs type hint analysis on the fullcodegen code object.
-class TypeHintAnalyzer final {
- public:
- explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
-
- TypeHintAnalysis* Analyze(Handle<Code> code);
-
- private:
- Zone* zone() const { return zone_; }
-
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 5ebc390c8b..8149a1bee4 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -83,10 +83,11 @@ Reduction TypedOptimization::Reduce(Node* node) {
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kNumberCeil:
- case IrOpcode::kNumberFloor:
case IrOpcode::kNumberRound:
case IrOpcode::kNumberTrunc:
return ReduceNumberRoundop(node);
+ case IrOpcode::kNumberFloor:
+ return ReduceNumberFloor(node);
case IrOpcode::kNumberToUint8Clamped:
return ReduceNumberToUint8Clamped(node);
case IrOpcode::kPhi:
@@ -185,6 +186,40 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Replace(input);
+ }
+ if (input_type->Is(Type::PlainNumber()) &&
+ input->opcode() == IrOpcode::kNumberDivide) {
+ Node* const lhs = NodeProperties::GetValueInput(input, 0);
+ Type* const lhs_type = NodeProperties::GetType(lhs);
+ Node* const rhs = NodeProperties::GetValueInput(input, 1);
+ Type* const rhs_type = NodeProperties::GetType(rhs);
+ if (lhs_type->Is(Type::Unsigned32()) && rhs_type->Is(Type::Unsigned32())) {
+ // We can replace
+ //
+ // NumberFloor(NumberDivide(lhs: unsigned32,
+ // rhs: unsigned32)): plain-number
+ //
+ // with
+ //
+ // NumberToUint32(NumberDivide(lhs, rhs))
+ //
+ // and just smash the type of the {lhs} on the {node},
+ // as the truncated result must be in the same range as
+ // {lhs} since {rhs} cannot be less than 1 (due to the
+ // plain-number type constraint on the {node}).
+ NodeProperties::ChangeOp(node, simplified()->NumberToUint32());
+ NodeProperties::SetType(node, lhs_type);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type* const input_type = NodeProperties::GetType(input);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index fb2db7249d..810914993f 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -46,6 +46,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckString(Node* node);
Reduction ReduceLoadField(Node* node);
+ Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 2642a1007a..51b8352b31 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -122,6 +122,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(Deoptimize)
DECLARE_CASE(DeoptimizeIf)
DECLARE_CASE(DeoptimizeUnless)
+ DECLARE_CASE(TrapIf)
+ DECLARE_CASE(TrapUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -185,6 +187,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(Deoptimize)
DECLARE_CASE(DeoptimizeIf)
DECLARE_CASE(DeoptimizeUnless)
+ DECLARE_CASE(TrapIf)
+ DECLARE_CASE(TrapUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -1233,6 +1237,10 @@ Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
return Type::Boolean();
@@ -1240,12 +1248,21 @@ Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
+// JS instanceof operator.
+
+Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
+ return Type::Boolean();
+}
-Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+Type* Typer::Visitor::JSOrdinaryHasInstanceTyper(Type* lhs, Type* rhs,
+ Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
+ return Type::Callable();
+}
+
// JS context operators.
@@ -1296,6 +1313,10 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
return Type::Receiver();
}
+Type* Typer::Visitor::TypeJSCallConstructWithSpread(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
@@ -1344,6 +1365,8 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kMathClz32:
return t->cache_.kZeroToThirtyTwo;
// Date functions.
+ case kDateNow:
+ return t->cache_.kTimeValueType;
case kDateGetDate:
return t->cache_.kJSDateDayType;
case kDateGetDay:
@@ -1363,6 +1386,7 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kJSDateSecondType;
case kDateGetTime:
return t->cache_.kJSDateValueType;
+
// Number functions.
case kNumberIsFinite:
case kNumberIsInteger:
@@ -1375,16 +1399,41 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kIntegerOrMinusZeroOrNaN;
case kNumberToString:
return Type::String();
+
// String functions.
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
case kStringCharAt:
+ return Type::String();
+ case kStringCodePointAt:
+ return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
+ Type::Undefined(), t->zone());
case kStringConcat:
case kStringFromCharCode:
+ case kStringFromCodePoint:
+ return Type::String();
+ case kStringIndexOf:
+ case kStringLastIndexOf:
+ return Type::Range(-1.0, String::kMaxLength - 1.0, t->zone());
+ case kStringEndsWith:
+ case kStringIncludes:
+ return Type::Boolean();
+ case kStringRaw:
+ case kStringRepeat:
+ case kStringSlice:
+ return Type::String();
+ case kStringStartsWith:
+ return Type::Boolean();
case kStringSubstr:
+ case kStringSubstring:
case kStringToLowerCase:
+ case kStringToString:
case kStringToUpperCase:
+ case kStringTrim:
+ case kStringTrimLeft:
+ case kStringTrimRight:
+ case kStringValueOf:
return Type::String();
case kStringIterator:
@@ -1401,16 +1450,53 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::OtherObject();
// Array functions.
+ case kArrayConcat:
+ return Type::Receiver();
+ case kArrayEvery:
+ return Type::Boolean();
+ case kArrayFill:
+ case kArrayFilter:
+ return Type::Receiver();
+ case kArrayFindIndex:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayForEach:
+ return Type::Undefined();
+ case kArrayIncludes:
+ return Type::Boolean();
case kArrayIndexOf:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayJoin:
+ return Type::String();
case kArrayLastIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayMap:
+ return Type::Receiver();
case kArrayPush:
return t->cache_.kPositiveSafeInteger;
+ case kArrayReverse:
+ case kArraySlice:
+ return Type::Receiver();
+ case kArraySome:
+ return Type::Boolean();
+ case kArraySplice:
+ return Type::Receiver();
+ case kArrayUnshift:
+ return t->cache_.kPositiveSafeInteger;
// Object functions.
case kObjectHasOwnProperty:
return Type::Boolean();
+ // RegExp functions.
+ case kRegExpCompile:
+ return Type::OtherObject();
+ case kRegExpExec:
+ return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+ case kRegExpTest:
+ return Type::Boolean();
+ case kRegExpToString:
+ return Type::String();
+
// Function functions.
case kFunctionHasInstance:
return Type::Boolean();
@@ -1595,6 +1681,8 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
return Type::String();
}
+Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
@@ -1628,6 +1716,11 @@ Type* Typer::Visitor::TypeCheckIf(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::InternalizedString(), zone());
+}
+
Type* Typer::Visitor::TypeCheckMaps(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1752,6 +1845,14 @@ Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
}
+Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
+ return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewRestParameterElements(Node* node) {
+ return Type::OtherInternal();
+}
+
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 806bd8f2c5..a2af190d9d 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -7,6 +7,7 @@
#include "src/compiler/types.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -196,7 +197,17 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
- if (map->is_undetectable()) return kOtherUndetectable;
+ if (map->is_undetectable()) {
+ // Currently we assume that every undetectable receiver is also
+ // callable, which is what we need to support document.all. We
+ // could add another Type bit to support other use cases in the
+ // future if necessary.
+ DCHECK(map->is_callable());
+ return kOtherUndetectable;
+ }
+ if (map->is_callable()) {
+ return kOtherCallable;
+ }
return kOtherObject;
case JS_VALUE_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
@@ -204,7 +215,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -254,16 +264,21 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
+ DCHECK(!map->is_callable());
DCHECK(!map->is_undetectable());
return kOtherObject;
+ case JS_BOUND_FUNCTION_TYPE:
+ DCHECK(!map->is_undetectable());
+ return kBoundFunction;
case JS_FUNCTION_TYPE:
DCHECK(!map->is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
DCHECK(!map->is_undetectable());
- return kProxy;
+ if (map->is_callable()) return kCallableProxy;
+ return kOtherProxy;
case MAP_TYPE:
case ALLOCATION_SITE_TYPE:
case ACCESSOR_INFO_TYPE:
@@ -297,8 +312,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
- case SIGNATURE_INFO_TYPE:
- case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
@@ -310,8 +323,10 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
+ case CONSTANT_ELEMENTS_PAIR_TYPE:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index e78357030e..b04f4e3c98 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -117,13 +117,16 @@ namespace compiler {
V(InternalizedString, 1u << 13) \
V(OtherString, 1u << 14) \
V(Simd, 1u << 15) \
+ V(OtherCallable, 1u << 16) \
V(OtherObject, 1u << 17) \
- V(OtherUndetectable, 1u << 16) \
- V(Proxy, 1u << 18) \
- V(Function, 1u << 19) \
- V(Hole, 1u << 20) \
- V(OtherInternal, 1u << 21) \
- V(ExternalPointer, 1u << 22) \
+ V(OtherUndetectable, 1u << 18) \
+ V(CallableProxy, 1u << 19) \
+ V(OtherProxy, 1u << 20) \
+ V(Function, 1u << 21) \
+ V(BoundFunction, 1u << 22) \
+ V(Hole, 1u << 23) \
+ V(OtherInternal, 1u << 24) \
+ V(ExternalPointer, 1u << 25) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
@@ -136,6 +139,7 @@ namespace compiler {
V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
V(Integral32, kSigned32 | kUnsigned32) \
+ V(Integral32OrMinusZeroOrNaN, kIntegral32 | kMinusZero | kNaN) \
V(PlainNumber, kIntegral32 | kOtherNumber) \
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
@@ -155,13 +159,22 @@ namespace compiler {
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
- V(Object, kFunction | kOtherObject | kOtherUndetectable) \
+ V(Proxy, kCallableProxy | kOtherProxy) \
+ V(Callable, kFunction | kBoundFunction | kOtherCallable | \
+ kCallableProxy | kOtherUndetectable) \
+ V(DetectableObject, kFunction | kBoundFunction | kOtherCallable | \
+ kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
+ V(Object, kDetectableObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
V(ReceiverOrUndefined, kReceiver | kUndefined) \
+ V(ReceiverOrNullOrUndefined, kReceiver | kNull | kUndefined) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
kReceiver) \
+ V(NonStringUniqueOrHole, kBoolean | kHole | kNull | kReceiver | \
+ kSymbol | kUndefined) \
V(Internal, kHole | kExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonNumber, kUnique | kString | kInternal) \
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 30473f2798..38e1f0c84f 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -18,8 +18,8 @@ namespace {
size_t HashCode(Node* node) {
size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
- for (int j = 0; j < node->InputCount(); ++j) {
- h = base::hash_combine(h, node->InputAt(j)->id());
+ for (Node* input : node->inputs()) {
+ h = base::hash_combine(h, input->id());
}
return h;
}
@@ -32,10 +32,17 @@ bool Equals(Node* a, Node* b) {
DCHECK_NOT_NULL(b->op());
if (!a->op()->Equals(b->op())) return false;
if (a->InputCount() != b->InputCount()) return false;
- for (int j = 0; j < a->InputCount(); ++j) {
- DCHECK_NOT_NULL(a->InputAt(j));
- DCHECK_NOT_NULL(b->InputAt(j));
- if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+ Node::Inputs aInputs = a->inputs();
+ Node::Inputs bInputs = b->inputs();
+
+ auto aIt = aInputs.begin();
+ auto bIt = bInputs.begin();
+ auto aEnd = aInputs.end();
+
+ for (; aIt != aEnd; ++aIt, ++bIt) {
+ DCHECK_NOT_NULL(*aIt);
+ DCHECK_NOT_NULL(*bIt);
+ if ((*aIt)->id() != (*bIt)->id()) return false;
}
return true;
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 872305b40a..e11fc98320 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -14,11 +14,12 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
@@ -150,7 +151,7 @@ void Verifier::Visitor::Check(Node* node) {
"control");
}
- // Verify that no-no-throw nodes only have IfSuccess/IfException control
+ // Verify that nodes that can throw only have IfSuccess/IfException control
// uses.
if (!node->op()->HasProperty(Operator::kNoThrow)) {
int count_success = 0, count_exception = 0;
@@ -283,6 +284,11 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kTrapIf:
+ case IrOpcode::kTrapUnless:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kThrow:
@@ -590,16 +596,38 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSLoadProperty:
+ // Type can be anything.
+ CheckTypeIs(node, Type::Any());
+ CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSLoadNamed:
+ // Type can be anything.
+ CheckTypeIs(node, Type::Any());
+ CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSLoadGlobal:
// Type can be anything.
CheckTypeIs(node, Type::Any());
+ CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSStoreProperty:
+ // Type is empty.
+ CheckNotTyped(node);
+ CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSStoreNamed:
+ // Type is empty.
+ CheckNotTyped(node);
+ CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSStoreGlobal:
// Type is empty.
CheckNotTyped(node);
+ CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
+ break;
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ // Type is empty.
+ CheckNotTyped(node);
break;
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
@@ -612,6 +640,13 @@ void Verifier::Visitor::Check(Node* node) {
// Type is String.
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kJSGetSuperConstructor:
+ // We don't check the input for Type::Function because
+ // this_function can be context-allocated.
+ // Any -> Callable.
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Callable());
+ break;
case IrOpcode::kJSLoadContext:
// Type can be anything.
@@ -636,6 +671,7 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckTypeIs(node, Type::Receiver());
@@ -861,6 +897,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kStringCharAt:
+ // (String, Unsigned32) -> String
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringCharCodeAt:
// (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
@@ -893,6 +935,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kNewUnmappedArgumentsElements:
+ CheckTypeIs(node, Type::OtherInternal());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -910,8 +956,6 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kTransitionElementsKind:
CheckValueInputIs(node, 0, Type::Any());
- CheckValueInputIs(node, 1, Type::Internal());
- CheckValueInputIs(node, 2, Type::Internal());
CheckNotTyped(node);
break;
@@ -1041,6 +1085,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Boolean());
CheckNotTyped(node);
break;
+ case IrOpcode::kCheckInternalizedString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::InternalizedString());
+ break;
case IrOpcode::kCheckMaps:
// (Any, Internal, ..., Internal) -> Any
CheckValueInputIs(node, 0, Type::Any());
@@ -1140,6 +1188,7 @@ void Verifier::Visitor::Check(Node* node) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 1b61c1504e..f54ddbf492 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -33,9 +33,12 @@
#include "src/factory.h"
#include "src/log-inl.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -68,9 +71,6 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
Handle<Context> context, Node** parameters,
int parameter_count, Node** effect_ptr,
Node* control) {
- // At the moment we only allow 2 parameters. If more parameters are needed,
- // then the size of {inputs} below has to be increased accordingly.
- DCHECK(parameter_count <= 2);
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -78,7 +78,11 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- Node* inputs[8];
+ // At the moment we only allow 2 parameters. If more parameters are needed,
+ // increase this constant accordingly.
+ static const int kMaxParams = 3;
+ DCHECK_GE(kMaxParams, parameter_count);
+ Node* inputs[kMaxParams + 6];
int count = 0;
inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
for (int i = 0; i < parameter_count; i++) {
@@ -99,6 +103,13 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
} // namespace
+// TODO(eholk): Support trap handlers on other platforms.
+#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
+const bool kTrapHandlerSupported = true;
+#else
+const bool kTrapHandlerSupported = false;
+#endif
+
// A helper that handles building graph fragments for trapping.
// To avoid generating a ton of redundant code that just calls the runtime
// to trap, we generate a per-trap-reason block of code that all trap sites
@@ -159,21 +170,70 @@ class WasmTrapHelper : public ZoneObject {
return TrapIfEq64(reason, node, 0, position);
}
+ Runtime::FunctionId GetFunctionIdForTrap(wasm::TrapReason reason) {
+ if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case wasm::k##name: \
+ return Runtime::kThrowWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+ default:
+ UNREACHABLE();
+ return Runtime::kNumFunctions;
+ }
+ } else {
+ // We use Runtime::kNumFunctions as a marker to tell the code generator
+ // to generate a call to a testing c-function instead of a runtime
+ // function. This code should only be called from a cctest.
+ return Runtime::kNumFunctions;
+ }
+ }
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_S390X
+#define WASM_TRAP_IF_SUPPORTED
+#endif
+
// Add a trap if {cond} is true.
void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- AddTrapIf(reason, cond, true, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+ if (FLAG_wasm_trap_if) {
+ int32_t trap_id = GetFunctionIdForTrap(reason);
+ Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
+ builder_->Effect(), builder_->Control());
+ *builder_->control_ = node;
+ builder_->SetSourcePosition(node, position);
+ return;
+ }
+#endif // WASM_TRAP_IF_SUPPORTED
+ BuildTrapIf(reason, cond, true, position);
}
// Add a trap if {cond} is false.
void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- AddTrapIf(reason, cond, false, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+ if (FLAG_wasm_trap_if) {
+ int32_t trap_id = GetFunctionIdForTrap(reason);
+
+ Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
+ builder_->Effect(), builder_->Control());
+ *builder_->control_ = node;
+ builder_->SetSourcePosition(node, position);
+ return;
+ }
+#endif // WASM_TRAP_IF_SUPPORTED
+
+ BuildTrapIf(reason, cond, false, position);
}
// Add a trap if {cond} is true or false according to {iftrue}.
- void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
- wasm::WasmCodePosition position) {
+ void BuildTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+ wasm::WasmCodePosition position) {
Node** effect_ptr = builder_->effect_;
Node** control_ptr = builder_->control_;
Node* before = *effect_ptr;
@@ -196,18 +256,18 @@ class WasmTrapHelper : public ZoneObject {
}
}
- Node* GetTrapValue(wasm::LocalType type) {
+ Node* GetTrapValue(wasm::ValueType type) {
switch (type) {
- case wasm::kAstI32:
+ case wasm::kWasmI32:
return jsgraph()->Int32Constant(0xdeadbeef);
- case wasm::kAstI64:
+ case wasm::kWasmI64:
return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
- case wasm::kAstF32:
+ case wasm::kWasmF32:
return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
- case wasm::kAstF64:
+ case wasm::kWasmF64:
return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
break;
- case wasm::kAstS128:
+ case wasm::kWasmS128:
return builder_->CreateS128Value(0xdeadbeef);
break;
default:
@@ -246,7 +306,6 @@ class WasmTrapHelper : public ZoneObject {
}
void BuildTrapCode(Node* reason_node, Node* position_node) {
- Node* end;
Node** control_ptr = builder_->control_;
Node** effect_ptr = builder_->effect_;
wasm::ModuleEnv* module = builder_->module_;
@@ -277,36 +336,36 @@ class WasmTrapHelper : public ZoneObject {
Node* thrw =
graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
*effect_ptr, *control_ptr);
- end = thrw;
+ MergeControlToEnd(jsgraph(), thrw);
} else {
// End the control flow with returning 0xdeadbeef
Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
- end = graph()->NewNode(jsgraph()->common()->Return(),
- jsgraph()->Int32Constant(0), ret_value,
- *effect_ptr, *control_ptr);
+ builder_->Return(ret_value);
}
-
- MergeControlToEnd(jsgraph(), end);
}
};
WasmGraphBuilder::WasmGraphBuilder(
- Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+ wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
+ wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
- module_(nullptr),
- mem_buffer_(nullptr),
- mem_size_(nullptr),
+ module_(module_env),
+ signature_tables_(zone),
function_tables_(zone),
function_table_sizes_(zone),
- control_(nullptr),
- effect_(nullptr),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
trap_(new (zone) WasmTrapHelper(this)),
- function_signature_(function_signature),
+ sig_(sig),
source_position_table_(source_position_table) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ if (sig->GetParam(i) == wasm::kWasmS128) has_simd_ = true;
+ }
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ if (sig->GetReturn(i) == wasm::kWasmS128) has_simd_ = true;
+ }
DCHECK_NOT_NULL(jsgraph_);
}
@@ -318,7 +377,7 @@ Node* WasmGraphBuilder::Start(unsigned params) {
return start;
}
-Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+Node* WasmGraphBuilder::Param(unsigned index) {
return graph()->NewNode(jsgraph()->common()->Parameter(index),
graph()->start());
}
@@ -376,7 +435,7 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
}
-Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
Node** buf = Realloc(vals, count, count + 1);
@@ -412,6 +471,7 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Node** effect, Node** control) {
+ if (FLAG_wasm_no_stack_checks) return;
if (effect == nullptr) {
effect = effect_;
}
@@ -434,17 +494,14 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
stack_check.Chain(*control);
Node* effect_true = *effect;
- Node* effect_false;
// Generate a call to the runtime if there is a stack check failure.
- {
- Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
- module_->instance->context, nullptr, 0,
- effect, stack_check.if_false);
- effect_false = node;
- }
+ Node* call = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
+ module_->instance->context, nullptr, 0,
+ effect, stack_check.if_false);
+ SetSourcePosition(call, position);
Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
- effect_true, effect_false, stack_check.merge);
+ effect_true, call, stack_check.merge);
*control = stack_check.merge;
*effect = ephi;
@@ -1042,9 +1099,18 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
- Node** buf = Realloc(vals, count, count + 3);
- memmove(buf + 1, buf, sizeof(void*) * count);
+ static const int kStackAllocatedNodeBufferSize = 8;
+ Node* stack_buffer[kStackAllocatedNodeBufferSize];
+ std::vector<Node*> heap_buffer;
+
+ Node** buf = stack_buffer;
+ if (count + 3 > kStackAllocatedNodeBufferSize) {
+ heap_buffer.resize(count + 3);
+ buf = heap_buffer.data();
+ }
+
buf[0] = jsgraph()->Int32Constant(0);
+ memcpy(buf + 1, vals, sizeof(void*) * count);
buf[count + 1] = *effect_;
buf[count + 2] = *control_;
Node* ret =
@@ -1107,7 +1173,7 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
}
Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
- wasm::LocalType wasmtype) {
+ wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1223,7 +1289,7 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
// Perform sign extension using following trick
// result = (x << machine_width - type_width) >> (machine_width -
// type_width)
- if (wasmtype == wasm::kAstI64) {
+ if (wasmtype == wasm::kWasmI64) {
shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
result = graph()->NewNode(
m->Word64Sar(),
@@ -1231,7 +1297,7 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
graph()->NewNode(m->ChangeInt32ToInt64(), result),
shiftBitCount),
shiftBitCount);
- } else if (wasmtype == wasm::kAstI32) {
+ } else if (wasmtype == wasm::kWasmI32) {
shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
result = graph()->NewNode(
m->Word32Sar(),
@@ -1714,9 +1780,8 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
Node* WasmGraphBuilder::GrowMemory(Node* input) {
Diamond check_input_range(
graph(), jsgraph()->common(),
- graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(), input,
- jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
+ graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+ jsgraph()->Uint32Constant(wasm::kV8MaxWasmMemoryPages)),
BranchHint::kTrue);
check_input_range.Chain(*control_);
@@ -1911,36 +1976,101 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
}
Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+ CommonOperatorBuilder* c = jsgraph()->common();
MachineOperatorBuilder* m = jsgraph()->machine();
+ Node* const zero = jsgraph()->Int32Constant(0);
Int32Matcher mr(right);
if (mr.HasValue()) {
- if (mr.Value() == 0) {
- return jsgraph()->Int32Constant(0);
- } else if (mr.Value() == -1) {
- return jsgraph()->Int32Constant(0);
+ if (mr.Value() == 0 || mr.Value() == -1) {
+ return zero;
}
return graph()->NewNode(m->Int32Mod(), left, right, *control_);
}
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ // General case for signed integer modulus, with optimization for (unknown)
+ // power of 2 right hand side.
+ //
+ // if 0 < right then
+ // msk = right - 1
+ // if right & msk != 0 then
+ // left % right
+ // else
+ // if left < 0 then
+ // -(-left & msk)
+ // else
+ // left & msk
+ // else
+ // if right < -1 then
+ // left % right
+ // else
+ // zero
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+ Node* const minus_one = jsgraph()->Int32Constant(-1);
- // Explicit check for x % -1.
- Diamond d(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- d.Chain(z.if_false);
+ const Operator* const merge_op = c->Merge(2);
+ const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
+
+ Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+ Node* branch0 =
+ graph()->NewNode(c->Branch(BranchHint::kTrue), check0, graph()->start());
+
+ Node* if_true0 = graph()->NewNode(c->IfTrue(), branch0);
+ Node* true0;
+ {
+ Node* msk = graph()->NewNode(m->Int32Add(), right, minus_one);
+
+ Node* check1 = graph()->NewNode(m->Word32And(), right, msk);
+ Node* branch1 = graph()->NewNode(c->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+ Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+ Node* false1;
+ {
+ Node* check2 = graph()->NewNode(m->Int32LessThan(), left, zero);
+ Node* branch2 =
+ graph()->NewNode(c->Branch(BranchHint::kFalse), check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(c->IfTrue(), branch2);
+ Node* true2 = graph()->NewNode(
+ m->Int32Sub(), zero,
+ graph()->NewNode(m->Word32And(),
+ graph()->NewNode(m->Int32Sub(), zero, left), msk));
+
+ Node* if_false2 = graph()->NewNode(c->IfFalse(), branch2);
+ Node* false2 = graph()->NewNode(m->Word32And(), left, msk);
+
+ if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(c->IfFalse(), branch0);
+ Node* false0;
+ {
+ Node* check1 = graph()->NewNode(m->Int32LessThan(), right, minus_one);
+ Node* branch1 =
+ graph()->NewNode(c->Branch(BranchHint::kTrue), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+ Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+ Node* false1 = zero;
+
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+ }
- return z.Phi(
- MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+ return graph()->NewNode(phi_op, true0, false0, merge0);
}
Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
@@ -2016,6 +2146,8 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
jsgraph()->Int64Constant(-1)));
+ d.Chain(*control_);
+
Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
d.if_false);
@@ -2179,6 +2311,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
Node* table = function_tables_[table_index];
+ Node* signatures = signature_tables_[table_index];
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
@@ -2187,7 +2320,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
const int fixed_offset = access.header_size - access.tag();
{
Node* load_sig = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), table,
+ machine->Load(MachineType::AnyTagged()), signatures,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
@@ -2202,14 +2335,12 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
}
// Load code object from the table.
- uint32_t table_size = module_->module->function_tables[table_index].min_size;
- uint32_t offset = fixed_offset + kPointerSize * table_size;
Node* load_code = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
- Uint32Constant(offset)),
+ Uint32Constant(fixed_offset)),
*effect_, *control_);
args[0] = load_code;
@@ -2342,24 +2473,20 @@ Node* WasmGraphBuilder::BuildChangeFloat64ToTagged(Node* value) {
return value;
}
-Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
switch (type) {
- case wasm::kAstI32:
+ case wasm::kWasmI32:
return BuildChangeInt32ToTagged(node);
- case wasm::kAstS128:
- case wasm::kAstI64:
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr,
- 0, effect_, *control_);
- case wasm::kAstF32:
+ case wasm::kWasmS128:
+ case wasm::kWasmI64:
+ UNREACHABLE();
+ case wasm::kWasmF32:
node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
node);
return BuildChangeFloat64ToTagged(node);
- case wasm::kAstF64:
+ case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
- case wasm::kAstStmt:
+ case wasm::kWasmStmt:
return jsgraph()->UndefinedConstant();
default:
UNREACHABLE();
@@ -2367,8 +2494,7 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
}
}
-Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
- Node* effect, Node* control) {
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2376,7 +2502,9 @@ Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- node, context, effect, control);
+ node, context, *effect_, *control_);
+
+ SetSourcePosition(result, 1);
*effect_ = result;
@@ -2495,35 +2623,30 @@ Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
}
Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
- wasm::LocalType type) {
+ wasm::ValueType type) {
+ DCHECK_NE(wasm::kWasmStmt, type);
+
// Do a JavaScript ToNumber.
- Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+ Node* num = BuildJavaScriptToNumber(node, context);
// Change representation.
SimplifiedOperatorBuilder simplified(jsgraph()->zone());
num = BuildChangeTaggedToFloat64(num);
switch (type) {
- case wasm::kAstI32: {
+ case wasm::kWasmI32: {
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
num);
break;
}
- case wasm::kAstS128:
- case wasm::kAstI64:
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr,
- 0, effect_, *control_);
- case wasm::kAstF32:
+ case wasm::kWasmS128:
+ case wasm::kWasmI64:
+ UNREACHABLE();
+ case wasm::kWasmF32:
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
num);
break;
- case wasm::kAstF64:
- break;
- case wasm::kAstStmt:
- num = jsgraph()->Int32Constant(0);
+ case wasm::kWasmF64:
break;
default:
UNREACHABLE();
@@ -2613,22 +2736,59 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
+bool IsJSCompatible(wasm::ValueType type) {
+ return (type != wasm::kWasmI64) && (type != wasm::kWasmS128);
+}
+
+bool HasJSCompatibleSignature(wasm::FunctionSig* sig) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ if (!IsJSCompatible(sig->GetParam(i))) {
+ return false;
+ }
+ }
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ if (!IsJSCompatible(sig->GetReturn(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
wasm::FunctionSig* sig) {
int wasm_count = static_cast<int>(sig->parameter_count());
- int param_count;
- if (jsgraph()->machine()->Is64()) {
- param_count = static_cast<int>(sig->parameter_count());
- } else {
- param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
- }
- int count = param_count + 3;
+ int count = wasm_count + 3;
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
- Node* start = Start(param_count + 5);
+ Node* start = Start(wasm_count + 5);
*control_ = start;
*effect_ = start;
+
+ if (!HasJSCompatibleSignature(sig_)) {
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
+ BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ jsgraph()->isolate()->native_context(), nullptr, 0,
+ effect_, *control_);
+
+ // Add a dummy call to the wasm function so that the generated wrapper
+ // contains a reference to the wrapped wasm function. Without this reference
+ // the wasm function could not be re-imported into another wasm module.
+ int pos = 0;
+ args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // We only need a dummy call descriptor.
+ wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
+ CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
+ jsgraph()->zone(), dummy_sig_builder.Build());
+ *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ Return(jsgraph()->UndefinedConstant());
+ return;
+ }
+
// Create the context parameter
Node* context = graph()->NewNode(
jsgraph()->common()->Parameter(
@@ -2640,15 +2800,9 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// Convert JS parameters to WASM numbers.
for (int i = 0; i < wasm_count; ++i) {
- Node* param =
- graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
+ Node* param = Param(i + 1);
Node* wasm_param = FromJS(param, context, sig->GetParam(i));
args[pos++] = wasm_param;
- if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
- // We make up the high word with SAR to get the proper sign extension.
- args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
- wasm_param, jsgraph()->Int32Constant(31));
- }
}
args[pos++] = *effect_;
@@ -2657,23 +2811,13 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// Call the WASM code.
CallDescriptor* desc =
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
- if (jsgraph()->machine()->Is32()) {
- desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
- }
+
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ *effect_ = call;
Node* retval = call;
- if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
- sig->GetReturn(0) == wasm::kAstI64) {
- // The return values comes as two values, we pick the low word.
- retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
- graph()->start());
- }
Node* jsval = ToJS(
- retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
- jsgraph()->Int32Constant(0), jsval, call, start);
-
- MergeControlToEnd(jsgraph(), ret);
+ retval, sig->return_count() == 0 ? wasm::kWasmStmt : sig->GetReturn());
+ Return(jsval);
}
int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
@@ -2681,14 +2825,8 @@ int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
// Convert WASM numbers to JS values.
int param_index = 0;
for (int i = 0; i < param_count; ++i) {
- Node* param = graph()->NewNode(
- jsgraph()->common()->Parameter(param_index++), graph()->start());
+ Node* param = Param(param_index++);
args[pos++] = ToJS(param, sig->GetParam(i));
- if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
- // On 32 bit platforms we have to skip the high word of int64
- // parameters.
- param_index++;
- }
}
return pos;
}
@@ -2698,19 +2836,23 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
DCHECK(target->IsCallable());
int wasm_count = static_cast<int>(sig->parameter_count());
- int param_count;
- if (jsgraph()->machine()->Is64()) {
- param_count = wasm_count;
- } else {
- param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
- }
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
CallDescriptor* desc;
- Node* start = Start(param_count + 3);
+ Node* start = Start(wasm_count + 3);
*effect_ = start;
*control_ = start;
+
+ if (!HasJSCompatibleSignature(sig_)) {
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
+ Return(BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ jsgraph()->isolate()->native_context(), nullptr,
+ 0, effect_, *control_));
+ return;
+ }
+
Node** args = Buffer(wasm_count + 7);
Node* call;
@@ -2777,24 +2919,113 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
}
+ *effect_ = call;
+ SetSourcePosition(call, 0);
+
// Convert the return value back.
- Node* ret;
- Node* val =
- FromJS(call, HeapConstant(isolate->native_context()),
- sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* pop_size = jsgraph()->Int32Constant(0);
+ Node* i32_zero = jsgraph()->Int32Constant(0);
+ Node* val = sig->return_count() == 0
+ ? i32_zero
+ : FromJS(call, HeapConstant(isolate->native_context()),
+ sig->GetReturn());
+ Return(val);
+}
+
+void WasmGraphBuilder::BuildWasmInterpreterEntry(
+ uint32_t function_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance) {
+ int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count = jsgraph()->machine()->Is64()
+ ? wasm_count
+ : Int64Lowering::GetParameterCountAfterLowering(sig);
+
+ // Build the start and the parameter nodes.
+ Node* start = Start(param_count + 3);
+ *effect_ = start;
+ *control_ = start;
+
+ // Compute size for the argument buffer.
+ int args_size_bytes = 0;
+ for (int i = 0; i < wasm_count; i++) {
+ args_size_bytes += 1 << ElementSizeLog2Of(sig->GetParam(i));
+ }
+
+ // The return value is also passed via this buffer:
+ DCHECK_GE(wasm::kV8MaxWasmFunctionReturns, sig->return_count());
+ // TODO(wasm): Handle multi-value returns.
+ DCHECK_EQ(1, wasm::kV8MaxWasmFunctionReturns);
+ int return_size_bytes =
+ sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
+
+ // Get a stack slot for the arguments.
+ Node* arg_buffer = graph()->NewNode(jsgraph()->machine()->StackSlot(
+ std::max(args_size_bytes, return_size_bytes)));
+
+ // Now store all our arguments to the buffer.
+ int param_index = 0;
+ int offset = 0;
+ for (int i = 0; i < wasm_count; i++) {
+ Node* param = Param(param_index++);
+ bool is_i64_as_two_params =
+ jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
+ MachineRepresentation param_rep =
+ is_i64_as_two_params ? wasm::kWasmI32 : sig->GetParam(i);
+ StoreRepresentation store_rep(param_rep, WriteBarrierKind::kNoWriteBarrier);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset), param, *effect_, *control_);
+ offset += 1 << ElementSizeLog2Of(param_rep);
+ // TODO(clemensh): Respect endianess here. Might need to swap upper and
+ // lower word.
+ if (is_i64_as_two_params) {
+ // Also store the upper half.
+ param = Param(param_index++);
+ StoreRepresentation store_rep(wasm::kWasmI32,
+ WriteBarrierKind::kNoWriteBarrier);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset), param, *effect_, *control_);
+ offset += 1 << ElementSizeLog2Of(wasm::kWasmI32);
+ }
+ }
+ DCHECK_EQ(param_count, param_index);
+ DCHECK_EQ(args_size_bytes, offset);
+
+ // We are passing the raw arg_buffer here. To the GC and other parts, it looks
+ // like a Smi (lowest bit not set). In the runtime function however, don't
+ // call Smi::value on it, but just cast it to a byte pointer.
+ Node* parameters[] = {
+ jsgraph()->HeapConstant(instance), // wasm instance
+ jsgraph()->SmiConstant(function_index), // function index
+ arg_buffer, // argument buffer
+ };
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(),
+ jsgraph()->isolate()->native_context(), parameters,
+ arraysize(parameters), effect_, *control_);
+
+ // Read back the return value.
if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
- sig->GetReturn() == wasm::kAstI64) {
- ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
- graph()->NewNode(jsgraph()->machine()->Word32Sar(),
- val, jsgraph()->Int32Constant(31)),
- call, start);
+ sig->GetReturn() == wasm::kWasmI64) {
+ MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(wasm::kWasmI32);
+ Node* lower =
+ graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(0), *effect_, *control_);
+ Node* upper =
+ graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(sizeof(int32_t)), *effect_, *control_);
+ Return(upper, lower);
} else {
- ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
- start);
+ Node* val;
+ if (sig->return_count() == 0) {
+ val = Int32Constant(0);
+ } else {
+ MachineType load_rep =
+ wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
+ val = graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(0), *effect_, *control_);
+ }
+ Return(val);
}
-
- MergeControlToEnd(jsgraph(), ret);
}
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
@@ -2853,12 +3084,18 @@ Node* WasmGraphBuilder::MemSize(uint32_t offset) {
void WasmGraphBuilder::EnsureFunctionTableNodes() {
if (function_tables_.size() > 0) return;
- for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
- auto handle = module_->instance->function_tables[i];
- DCHECK(!handle.is_null());
- function_tables_.push_back(HeapConstant(handle));
+ size_t tables_size = module_->instance->function_tables.size();
+ DCHECK(tables_size == module_->instance->signature_tables.size());
+ for (size_t i = 0; i < tables_size; ++i) {
+ auto function_handle = module_->instance->function_tables[i];
+ auto signature_handle = module_->instance->signature_tables[i];
+ DCHECK(!function_handle.is_null() && !signature_handle.is_null());
+ function_tables_.push_back(HeapConstant(function_handle));
+ signature_tables_.push_back(HeapConstant(signature_handle));
uint32_t table_size = module_->module->function_tables[i].min_size;
- function_table_sizes_.push_back(Uint32Constant(table_size));
+ function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
}
}
@@ -2895,6 +3132,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t offset,
wasm::WasmCodePosition position) {
DCHECK(module_ && module_->instance);
+ if (FLAG_wasm_no_bounds_checks) return;
uint32_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
@@ -2945,15 +3183,14 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
-
-Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler) {
+ if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
BoundsCheckMem(memtype, index, offset, position);
}
bool aligned = static_cast<int>(alignment) >=
@@ -2961,18 +3198,19 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
- if (FLAG_wasm_trap_handler) {
- Node* context = HeapConstant(module_->instance->context);
+ if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ DCHECK(FLAG_wasm_guard_pages);
Node* position_node = jsgraph()->Int32Constant(position);
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, context, position_node,
- *effect_, *control_);
+ MemBuffer(offset), index, position_node, *effect_,
+ *control_);
} else {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
} else {
- DCHECK(!FLAG_wasm_trap_handler);
+ // TODO(eholk): Support unaligned loads with trap handlers.
+ DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -2983,7 +3221,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
load = BuildChangeEndianness(load, memtype, type);
#endif
- if (type == wasm::kAstI64 &&
+ if (type == wasm::kWasmI64 &&
ElementSizeLog2Of(memtype.representation()) < 3) {
// TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
if (memtype.IsSigned()) {
@@ -3006,7 +3244,9 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
Node* store;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset, position);
+ if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+ BoundsCheckMem(memtype, index, offset, position);
+ }
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
bool aligned = static_cast<int>(alignment) >=
@@ -3018,11 +3258,20 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
if (aligned ||
jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
- store =
- graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
+ if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ Node* position_node = jsgraph()->Int32Constant(position);
+ store = graph()->NewNode(
+ jsgraph()->machine()->ProtectedStore(memtype.representation()),
+ MemBuffer(offset), index, val, position_node, *effect_, *control_);
+ } else {
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ }
} else {
+ // TODO(eholk): Support unaligned stores with trap handlers.
+ DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3070,16 +3319,14 @@ Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::Int64LoweringForTesting() {
if (jsgraph()->machine()->Is32()) {
Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
- jsgraph()->common(), jsgraph()->zone(),
- function_signature_);
+ jsgraph()->common(), jsgraph()->zone(), sig_);
r.LowerGraph();
}
}
void WasmGraphBuilder::SimdScalarLoweringForTesting() {
SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
- jsgraph()->common(), jsgraph()->zone(),
- function_signature_)
+ jsgraph()->common(), jsgraph()->zone(), sig_)
.LowerGraph();
}
@@ -3093,6 +3340,7 @@ void WasmGraphBuilder::SetSourcePosition(Node* node,
Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
// TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
// instead of creating a SIMD Value.
+ has_simd_ = true;
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
Int32Constant(value), Int32Constant(value),
Int32Constant(value), Int32Constant(value));
@@ -3100,36 +3348,78 @@ Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
const NodeVector& inputs) {
+ has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF32x4Splat:
+ return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
+ inputs[0], inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprF32x4FromInt32x4:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
+ inputs[0]);
+ case wasm::kExprF32x4FromUint32x4:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
+ inputs[0]);
+ case wasm::kExprF32x4Abs:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Abs(), inputs[0]);
+ case wasm::kExprF32x4Neg:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Neg(), inputs[0]);
+ case wasm::kExprF32x4Add:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Sub:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Eq:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Equal(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Ne:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4NotEqual(),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprI32x4FromFloat32x4:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
+ inputs[0]);
+ case wasm::kExprUi32x4FromFloat32x4:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
+ inputs[0]);
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
- inputs[0], inputs[1]);
- case wasm::kExprF32x4Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
- inputs[0], inputs[0], inputs[0], inputs[0]);
- case wasm::kExprF32x4Add:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ case wasm::kExprI32x4Sub:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4Eq:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4Ne:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprS32x4Select:
+ return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
+ inputs[1], inputs[2]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
}
-Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
- Node* input) {
+Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+ const NodeVector& inputs) {
+ has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
- Int32Constant(lane));
+ return graph()->NewNode(jsgraph()->common()->Int32x4ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI32x4ReplaceLane:
+ return graph()->NewNode(jsgraph()->common()->Int32x4ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
- input, Int32Constant(lane));
+ return graph()->NewNode(jsgraph()->common()->Float32x4ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF32x4ReplaceLane:
+ return graph()->NewNode(jsgraph()->common()->Float32x4ReplaceLane(lane),
+ inputs[0], inputs[1]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3156,9 +3446,10 @@ static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
*script_str, 0, 0));
}
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
Handle<Code> wasm_code, uint32_t index) {
- const wasm::WasmFunction* func = &module->module->functions[index];
+ const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
// Create the Graph
@@ -3172,10 +3463,10 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ wasm::ModuleEnv module_env(module, nullptr);
+ WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.set_module(module);
builder.BuildJSToWasmWrapper(wasm_code, func->sig);
//----------------------------------------------------------------------------
@@ -3188,8 +3479,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
}
// Schedule and compile to machine code.
- int params =
- static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+ int params = static_cast<int>(
+ module_env.GetFunctionSignature(index)->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
@@ -3222,10 +3513,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
- wasm::WasmName("export"),
- module->module->GetName(func->name_offset, func->name_length));
+ char func_name[32];
+ SNPrintF(ArrayVector(func_name), "js-to-wasm#%d", func->func_index);
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "js-to-wasm", index, wasm::WasmName("export"),
+ CStrVector(func_name));
}
return code;
}
@@ -3233,7 +3525,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
- MaybeHandle<String> import_name) {
+ MaybeHandle<String> import_name,
+ wasm::ModuleOrigin origin) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -3246,7 +3539,12 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(&zone, &jsgraph, sig);
+ SourcePositionTable* source_position_table =
+ origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
+ : nullptr;
+
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+ source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToJSWrapper(target, sig);
@@ -3282,7 +3580,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
CompilationInfo info(func_name, isolate, &zone, flags);
- code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr,
+ source_position_table);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -3310,6 +3609,72 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
return code;
}
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+ wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance) {
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.BuildWasmInterpreterEntry(func_index, sig, instance);
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Wasm to interpreter graph -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming =
+ wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ }
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
+ EmbeddedVector<char, 32> debug_name;
+ int name_len = SNPrintF(debug_name, "wasm-to-interpreter#%d", func_index);
+ DCHECK(name_len > 0 && name_len < debug_name.length());
+ debug_name.Truncate(name_len);
+ DCHECK_EQ('\0', debug_name.start()[debug_name.length()]);
+
+ CompilationInfo info(debug_name, isolate, &zone, flags);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(debug_name.start(), os);
+ }
+#endif
+
+ if (isolate->logger()->is_logging_code_events() ||
+ isolate->is_profiling()) {
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "wasm-to-interpreter", func_index,
+ wasm::WasmName("module"), debug_name);
+ }
+ }
+
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(1, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ deopt_data->set(0, *weak_instance);
+ code->set_deoptimization_data(*deopt_data);
+
+ return code;
+}
+
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
base::ElapsedTimer decode_timer;
@@ -3323,12 +3688,12 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
MachineOperatorBuilder* machine = jsgraph_->machine();
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
- WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
- source_position_table);
- wasm::FunctionBody body = {
- module_env_, function_->sig, module_env_->module->module_start,
- module_env_->module->module_start + function_->code_start_offset,
- module_env_->module->module_start + function_->code_end_offset};
+ WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
+ function_->sig, source_position_table);
+ const byte* module_start = module_env_->module_bytes.start();
+ wasm::FunctionBody body = {function_->sig, module_start,
+ module_start + function_->code_start_offset,
+ module_start + function_->code_end_offset};
graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
@@ -3341,18 +3706,25 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
}
if (machine->Is32()) {
- Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
- r.LowerGraph();
+ Int64Lowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ .LowerGraph();
}
- SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
- .LowerGraph();
+ if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
+ SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ .LowerGraph();
+ }
int index = static_cast<int>(function_->func_index);
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
OFStream os(stdout);
- PrintAst(isolate_->allocator(), body, os, nullptr);
+ PrintRawWasmCode(isolate_->allocator(), body, module_env_->module);
+ }
+ if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
+ OFStream os(stdout);
+ PrintWasmText(module_env_->module, *module_env_, function_->func_index, os,
+ nullptr);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -3362,13 +3734,13 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function,
uint32_t index)
: thrower_(thrower),
isolate_(isolate),
module_env_(module_env),
- function_(function),
+ function_(&module_env->module->functions[index]),
graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
jsgraph_(new (graph_zone()) JSGraph(
isolate, new (graph_zone()) Graph(graph_zone()),
@@ -3378,15 +3750,14 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()))),
compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(function->name_length != 0
- ? module_env->module->GetNameOrNull(function->name_offset,
- function->name_length)
- : ArrayVector("wasm"),
+ info_(function->name_length != 0 ? module_env->GetNameOrNull(function)
+ : ArrayVector("wasm"),
isolate, &compilation_zone_,
Code::ComputeFlags(Code::WASM_FUNCTION)),
job_(),
index_(index),
- ok_(true) {
+ ok_(true),
+ protected_instructions_(&compilation_zone_) {
// Create and cache this node in the main thread.
jsgraph_->CEntryStubConstant(1);
}
@@ -3426,8 +3797,9 @@ void WasmCompilationUnit::ExecuteCompilation() {
descriptor =
module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
}
- job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
- descriptor, source_positions));
+ job_.reset(Pipeline::NewWasmCompilationJob(
+ &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
+ module_env_->module->origin != wasm::kWasmOrigin));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(ahaas): The counters are not thread-safe at the moment.
@@ -3451,8 +3823,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- wasm::WasmName name = module_env_->module->GetName(
- function_->name_offset, function_->name_length);
+ wasm::WasmName name = module_env_->GetName(function_);
SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
function_->func_index, name.length(), name.start());
thrower_->CompileFailed(buffer.start(), graph_construction_result_);
@@ -3472,11 +3843,10 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
- function_->func_index, wasm::WasmName("module"),
- module_env_->module->GetName(function_->name_offset,
- function_->name_length));
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
+ "WASM_function", function_->func_index,
+ wasm::WasmName("module"),
+ module_env_->GetName(function_));
}
if (FLAG_trace_wasm_decode_time) {
@@ -3487,9 +3857,27 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
compile_ms);
}
+ Handle<FixedArray> protected_instructions = PackProtectedInstructions();
+ code->set_protected_instructions(*protected_instructions);
+
return code;
}
+Handle<FixedArray> WasmCompilationUnit::PackProtectedInstructions() const {
+ const int num_instructions = static_cast<int>(protected_instructions_.size());
+ Handle<FixedArray> fn_protected = isolate_->factory()->NewFixedArray(
+ num_instructions * Code::kTrapDataSize, TENURED);
+ for (unsigned i = 0; i < protected_instructions_.size(); ++i) {
+ const trap_handler::ProtectedInstructionData& instruction =
+ protected_instructions_[i];
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapCodeOffset,
+ Smi::FromInt(instruction.instr_offset));
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapLandingOffset,
+ Smi::FromInt(instruction.landing_offset));
+ }
+ return fn_protected;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index b4bc350297..a1bad1f0e5 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -11,6 +11,8 @@
// Do not include anything from src/compiler here!
#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone.h"
@@ -29,8 +31,10 @@ class SourcePositionTable;
namespace wasm {
// Forward declarations for some WASM data structures.
+struct ModuleBytesEnv;
struct ModuleEnv;
struct WasmFunction;
+struct WasmModule;
class ErrorThrower;
struct DecodeStruct;
@@ -43,7 +47,7 @@ namespace compiler {
class WasmCompilationUnit final {
public:
WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function, uint32_t index);
Zone* graph_zone() { return graph_zone_.get(); }
@@ -54,19 +58,21 @@ class WasmCompilationUnit final {
static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+ WasmCompilationUnit unit(thrower, isolate, module_env, function,
+ function->func_index);
unit.ExecuteCompilation();
return unit.FinishCompilation();
}
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+ Handle<FixedArray> PackProtectedInstructions() const;
wasm::ErrorThrower* thrower_;
Isolate* isolate_;
- wasm::ModuleEnv* module_env_;
+ wasm::ModuleBytesEnv* module_env_;
const wasm::WasmFunction* function_;
// The graph zone is deallocated at the end of ExecuteCompilation.
std::unique_ptr<Zone> graph_zone_;
@@ -77,6 +83,9 @@ class WasmCompilationUnit final {
uint32_t index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_;
+ ZoneVector<trap_handler::ProtectedInstructionData>
+ protected_instructions_; // Instructions that are protected by the signal
+ // handler.
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
@@ -85,12 +94,20 @@ class WasmCompilationUnit final {
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
- MaybeHandle<String> import_name);
+ MaybeHandle<String> import_name,
+ wasm::ModuleOrigin origin);
// Wraps a given wasm code object, producing a code object.
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
Handle<Code> wasm_code, uint32_t index);
+// Compiles a stub that redirects a call to a wasm function to the wasm
+// interpreter. It's ABI compatible with the compiled wasm function.
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+ wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance);
+
// Abstracts details of building TurboFan graph nodes for WASM to separate
// the WASM decoder from the internal details of TurboFan.
class WasmTrapHelper;
@@ -98,7 +115,7 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
WasmGraphBuilder(
- Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+ wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
@@ -116,11 +133,11 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
Node* Error();
Node* Start(unsigned params);
- Node* Param(unsigned index, wasm::LocalType type);
+ Node* Param(unsigned index);
Node* Loop(Node* entry);
Node* Terminate(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
- Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+ Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
Node* NumberConstant(int32_t value);
Node* Uint32Constant(uint32_t value);
@@ -155,7 +172,12 @@ class WasmGraphBuilder {
Node* Switch(unsigned count, Node* key);
Node* IfValue(int32_t value, Node* sw);
Node* IfDefault(Node* sw);
- Node* Return(unsigned count, Node** vals);
+ Node* Return(unsigned count, Node** nodes);
+ template <typename... Nodes>
+ Node* Return(Node* fst, Nodes*... more) {
+ Node* arr[] = {fst, more...};
+ return Return(arraysize(arr), arr);
+ }
Node* ReturnVoid();
Node* Unreachable(wasm::WasmCodePosition position);
@@ -166,9 +188,11 @@ class WasmGraphBuilder {
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
+ void BuildWasmInterpreterEntry(uint32_t func_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance);
- Node* ToJS(Node* node, wasm::LocalType type);
- Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+ Node* ToJS(Node* node, wasm::ValueType type);
+ Node* FromJS(Node* node, Node* context, wasm::ValueType type);
Node* Invert(Node* node);
void EnsureFunctionTableNodes();
@@ -178,7 +202,7 @@ class WasmGraphBuilder {
Node* CurrentMemoryPages();
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
- Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+ Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
Node* StoreMem(MachineType type, Node* index, uint32_t offset,
@@ -190,13 +214,11 @@ class WasmGraphBuilder {
Node* Control() { return *control_; }
Node* Effect() { return *effect_; }
- void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
-
void set_control_ptr(Node** control) { this->control_ = control; }
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
- wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+ wasm::FunctionSig* GetFunctionSignature() { return sig_; }
void Int64LoweringForTesting();
@@ -207,7 +229,13 @@ class WasmGraphBuilder {
Node* CreateS128Value(int32_t value);
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
- Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
+
+ Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+ const NodeVector& inputs);
+
+ bool has_simd() const { return has_simd_; }
+
+ wasm::ModuleEnv* module_env() const { return module_; }
private:
static const int kDefaultBufferSize = 16;
@@ -215,19 +243,21 @@ class WasmGraphBuilder {
Zone* zone_;
JSGraph* jsgraph_;
- wasm::ModuleEnv* module_;
- Node* mem_buffer_;
- Node* mem_size_;
+ wasm::ModuleEnv* module_ = nullptr;
+ Node* mem_buffer_ = nullptr;
+ Node* mem_size_ = nullptr;
+ NodeVector signature_tables_;
NodeVector function_tables_;
NodeVector function_table_sizes_;
- Node** control_;
- Node** effect_;
+ Node** control_ = nullptr;
+ Node** effect_ = nullptr;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
+ bool has_simd_ = false;
WasmTrapHelper* trap_;
- wasm::FunctionSig* function_signature_;
+ wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
compiler::SourcePositionTable* source_position_table_ = nullptr;
@@ -243,7 +273,7 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* BuildChangeEndianness(Node* node, MachineType type,
- wasm::LocalType wasmtype = wasm::kAstStmt);
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -314,8 +344,7 @@ class WasmGraphBuilder {
MachineType result_type, int trap_zero,
wasm::WasmCodePosition position);
- Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
- Node* control);
+ Node* BuildJavaScriptToNumber(Node* node, Node* context);
Node* BuildChangeInt32ToTagged(Node* value);
Node* BuildChangeFloat64ToTagged(Node* value);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index a41c93ca35..c4acfb3672 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -24,17 +24,17 @@ using compiler::LinkageLocation;
namespace {
-MachineType MachineTypeFor(LocalType type) {
+MachineType MachineTypeFor(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return MachineType::Int32();
- case kAstI64:
+ case kWasmI64:
return MachineType::Int64();
- case kAstF64:
+ case kWasmF64:
return MachineType::Float64();
- case kAstF32:
+ case kWasmF32:
return MachineType::Float32();
- case kAstS128:
+ case kWasmS128:
return MachineType::Simd128();
default:
UNREACHABLE();
@@ -173,7 +173,7 @@ struct Allocator {
int stack_offset;
- LinkageLocation Next(LocalType type) {
+ LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
@@ -182,7 +182,7 @@ struct Allocator {
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == kAstF32) {
+ if (type == kWasmF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code),
@@ -206,11 +206,11 @@ struct Allocator {
}
}
}
- bool IsFloatingPoint(LocalType type) {
- return type == kAstF32 || type == kAstF64;
+ bool IsFloatingPoint(ValueType type) {
+ return type == kWasmF32 || type == kWasmF64;
}
- int Words(LocalType type) {
- if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
+ int Words(ValueType type) {
+ if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
return 2;
}
return 1;
@@ -285,7 +285,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// Add return location(s).
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
- LocalType ret = fsig->GetReturn(i);
+ ValueType ret = fsig->GetReturn(i);
locations.AddReturn(rets.Next(ret));
}
@@ -294,7 +294,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
- LocalType param = fsig->GetParam(i);
+ ValueType param = fsig->GetParam(i);
locations.AddParam(params.Next(param));
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 745ac50841..cd4eeedf10 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -43,9 +43,7 @@ class X64OperandConverter : public InstructionOperandConverter {
DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
return Immediate(0);
}
- if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(constant.ToInt32(), constant.rmode());
}
return Immediate(constant.ToInt32());
@@ -270,38 +268,58 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
class WasmOutOfLineTrap final : public OutOfLineCode {
public:
- WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
- Register context, int32_t position)
+ WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
+ int32_t position, Instruction* instr)
: OutOfLineCode(gen),
+ gen_(gen),
pc_(pc),
frame_elided_(frame_elided),
- context_(context),
- position_(position) {}
+ position_(position),
+ instr_(instr) {}
+ // TODO(eholk): Refactor this method to take the code generator as a
+ // parameter.
void Generate() final {
- // TODO(eholk): record pc_ and the current pc in a table so that
- // the signal handler can find it.
- USE(pc_);
+ int current_pc = __ pc_offset();
+
+ gen_->AddProtectedInstruction(pc_, current_pc);
if (frame_elided_) {
- __ EnterFrame(StackFrame::WASM);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
}
wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
__ Push(Smi::FromInt(trap_reason));
__ Push(Smi::FromInt(position_));
- __ Move(rsi, context_);
+ __ Move(rsi, gen_->isolate()->native_context());
__ CallRuntime(Runtime::kThrowWasmError);
+
+ if (instr_->reference_map() != nullptr) {
+ gen_->RecordSafepoint(instr_->reference_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
}
private:
- Address pc_;
+ CodeGenerator* gen_;
+ int pc_;
bool frame_elided_;
- Register context_;
int32_t position_;
+ Instruction* instr_;
};
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, size_t input_count,
+ X64OperandConverter& i, int pc, Instruction* instr) {
+ const X64MemoryProtection protection =
+ static_cast<X64MemoryProtection>(MiscField::decode(opcode));
+ if (protection == X64MemoryProtection::kProtected) {
+ const bool frame_elided = !codegen->frame_access_state()->has_frame();
+ const int32_t position = i.InputInt32(input_count - 1);
+ new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position, instr);
+ }
+}
} // namespace
@@ -1838,21 +1856,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kX64Movsxbl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxbq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxbq);
break;
case kX64Movzxbq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1863,21 +1891,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movsxwl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxwq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1888,7 +1926,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
- case kX64TrapMovl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
@@ -1897,14 +1936,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(i.OutputRegister(), i.InputOperand(0));
}
} else {
- Address pc = __ pc();
__ movl(i.OutputRegister(), i.MemoryOperand());
-
- if (arch_opcode == kX64TrapMovl) {
- bool frame_elided = !frame_access_state()->has_frame();
- new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
- i.InputRegister(2), i.InputInt32(3));
- }
}
__ AssertZeroExtended(i.OutputRegister());
} else {
@@ -1918,9 +1950,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsxlq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxlq);
break;
case kX64Movq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -1934,6 +1970,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movss:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -1943,6 +1981,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsd:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2124,6 +2164,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
+ case kX64Int32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64Int32x4Add: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64Int32x4Sub: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -2183,61 +2243,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
+namespace {
-// Assembles branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- X64OperandConverter i(this, instr);
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
- switch (branch->condition) {
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
case kUnorderedEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kEqual:
- __ j(equal, tlabel);
- break;
+ return equal;
case kUnorderedNotEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kNotEqual:
- __ j(not_equal, tlabel);
- break;
+ return not_equal;
case kSignedLessThan:
- __ j(less, tlabel);
- break;
+ return less;
case kSignedGreaterThanOrEqual:
- __ j(greater_equal, tlabel);
- break;
+ return greater_equal;
case kSignedLessThanOrEqual:
- __ j(less_equal, tlabel);
- break;
+ return less_equal;
case kSignedGreaterThan:
- __ j(greater, tlabel);
- break;
+ return greater;
case kUnsignedLessThan:
- __ j(below, tlabel);
- break;
+ return below;
case kUnsignedGreaterThanOrEqual:
- __ j(above_equal, tlabel);
- break;
+ return above_equal;
case kUnsignedLessThanOrEqual:
- __ j(below_equal, tlabel);
- break;
+ return below_equal;
case kUnsignedGreaterThan:
- __ j(above, tlabel);
- break;
+ return above;
case kOverflow:
- __ j(overflow, tlabel);
- break;
+ return overflow;
case kNotOverflow:
- __ j(no_overflow, tlabel);
- break;
+ return no_overflow;
default:
- UNREACHABLE();
break;
}
+ UNREACHABLE();
+ return no_condition;
+}
+
+} // namespace
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
@@ -2246,6 +2303,71 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ X64OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(rsi, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_even, &end);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(condition), tlabel);
+ __ bind(&end);
+}
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2258,60 +2380,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = no_condition;
- switch (condition) {
- case kUnorderedEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kEqual:
- cc = equal;
- break;
- case kUnorderedNotEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kNotEqual:
- cc = not_equal;
- break;
- case kSignedLessThan:
- cc = less;
- break;
- case kSignedGreaterThanOrEqual:
- cc = greater_equal;
- break;
- case kSignedLessThanOrEqual:
- cc = less_equal;
- break;
- case kSignedGreaterThan:
- cc = greater;
- break;
- case kUnsignedLessThan:
- cc = below;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- cc = below_equal;
- break;
- case kUnsignedGreaterThan:
- cc = above;
- break;
- case kOverflow:
- cc = overflow;
- break;
- case kNotOverflow:
- cc = no_overflow;
- break;
- default:
- UNREACHABLE();
- break;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
}
__ bind(&check);
- __ setcc(cc, reg);
+ __ setcc(FlagsConditionToCondition(condition), reg);
__ movzxbl(reg, reg);
__ bind(&done);
}
@@ -2555,8 +2634,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchRegister;
switch (src.type()) {
case Constant::kInt32: {
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
// TODO(dcarney): don't need scratch in this case.
@@ -2564,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (value == 0) {
__ xorl(dst, dst);
} else {
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ movl(dst, Immediate(value, src.rmode()));
} else {
__ movl(dst, Immediate(value));
@@ -2574,11 +2652,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Set(dst, src.ToInt64());
}
break;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 35acec08dc..aad172788e 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -128,7 +128,6 @@ namespace compiler {
V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
- V(X64TrapMovl) \
V(X64Movsxlq) \
V(X64Movq) \
V(X64Movsd) \
@@ -148,7 +147,10 @@ namespace compiler {
V(X64Xchgw) \
V(X64Xchgl) \
V(X64Int32x4Create) \
- V(X64Int32x4ExtractLane)
+ V(X64Int32x4ExtractLane) \
+ V(X64Int32x4ReplaceLane) \
+ V(X64Int32x4Add) \
+ V(X64Int32x4Sub)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -183,6 +185,8 @@ namespace compiler {
V(M8I) /* [ %r2*8 + K] */ \
V(Root) /* [%root + K] */
+enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index ef0c3ad92c..427e58083f 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -125,6 +125,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Inc32:
case kX64Int32x4Create:
case kX64Int32x4ExtractLane:
+ case kX64Int32x4ReplaceLane:
+ case kX64Int32x4Add:
+ case kX64Int32x4Sub:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -155,7 +158,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
case kX64Movl:
- case kX64TrapMovl:
if (instr->HasOutput()) {
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 878e778da0..4c213793f7 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -82,6 +82,15 @@ class X64OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
+ if (base != nullptr && (index != nullptr || displacement != nullptr)) {
+ if (base->opcode() == IrOpcode::kInt32Constant &&
+ OpParameter<int32_t>(base) == 0) {
+ base = nullptr;
+ } else if (base->opcode() == IrOpcode::kInt64Constant &&
+ OpParameter<int64_t>(base) == 0) {
+ base = nullptr;
+ }
+ }
if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
if (index != nullptr) {
@@ -110,17 +119,22 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
} else {
- DCHECK_NOT_NULL(index);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
- inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
- ? UseNegatedImmediate(displacement)
- : UseImmediate(displacement);
- static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
- kMode_M4I, kMode_M8I};
- mode = kMnI_modes[scale_exponent];
+ if (index == nullptr) {
+ inputs[(*input_count)++] = UseRegister(displacement);
+ mode = kMode_MR;
+ } else {
+ inputs[(*input_count)++] = UseRegister(index);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
+ static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale_exponent];
+ }
} else {
+ inputs[(*input_count)++] = UseRegister(index);
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
kMode_M4, kMode_M8};
mode = kMn_modes[scale_exponent];
@@ -154,10 +168,18 @@ class X64OperandGenerator final : public OperandGenerator {
}
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
- if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+ if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count);
+ } else if (m.base() == nullptr &&
+ m.displacement_mode() == kPositiveDisplacement) {
+ // The displacement cannot be an immediate, but we can use the
+ // displacement as base instead and still benefit from addressing
+ // modes for the scale.
+ return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
+ nullptr, m.displacement_mode(), inputs,
+ input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -171,7 +193,6 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
-
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
@@ -205,6 +226,39 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
return opcode;
}
+ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ return kX64Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ return kX64Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kX64Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ return kX64Movw;
+ break;
+ case MachineRepresentation::kWord32:
+ return kX64Movl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ return kX64Movq;
+ break;
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return kArchNop;
+ }
+ UNREACHABLE();
+ return kArchNop;
+}
+
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
@@ -214,33 +268,21 @@ void InstructionSelector::VisitLoad(Node* node) {
ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
-
- ArchOpcode opcode = GetLoadOpcode(load_rep);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- // Add the context parameter as an input.
- inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
- // Add the source position as an input
- inputs[input_count++] = g.UseImmediate(node->InputAt(3));
InstructionCode code = opcode | AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kProtectedLoad) {
+ code |= MiscField::encode(X64MemoryProtection::kProtected);
+ // Add the source position as an input
+ inputs[input_count++] = g.UseImmediate(node->InputAt(2));
+ }
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -249,10 +291,9 @@ void InstructionSelector::VisitStore(Node* node) {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedPointer(store_rep.representation()));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -287,35 +328,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kX64Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX64Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kX64Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Movw;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Movl;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64:
- opcode = kX64Movq;
- break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
+ ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode =
@@ -330,6 +343,27 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* value = node->InputAt(2);
+ Node* position = node->InputAt(3);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+
+ ArchOpcode opcode = GetStoreOpcode(store_rep);
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ MiscField::encode(X64MemoryProtection::kProtected);
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ inputs[input_count++] = g.UseImmediate(position);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1650,10 +1684,13 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
+ } else {
+ DCHECK(cont->IsTrap());
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1669,9 +1706,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1687,21 +1727,54 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+ if (hint_node->opcode() == IrOpcode::kLoad) {
+ MachineType hint = LoadRepresentationOf(hint_node->op());
+ if (node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant) {
+ int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(node)
+ : OpParameter<int64_t>(node);
+ if (hint == MachineType::Int8()) {
+ if (constant >= std::numeric_limits<int8_t>::min() &&
+ constant <= std::numeric_limits<int8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint8()) {
+ if (constant >= std::numeric_limits<uint8_t>::min() &&
+ constant <= std::numeric_limits<uint8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int16()) {
+ if (constant >= std::numeric_limits<int16_t>::min() &&
+ constant <= std::numeric_limits<int16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint16()) {
+ if (constant >= std::numeric_limits<uint16_t>::min() &&
+ constant <= std::numeric_limits<uint16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int32()) {
+ return hint;
+ } else if (hint == MachineType::Uint32()) {
+ if (constant >= 0) return hint;
+ }
+ }
+ }
+ return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+ : MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // Currently, if one of the two operands is not a Load, we don't know what its
- // machine representation is, so we bail out.
- // TODO(epertoso): we can probably get some size information out of immediates
- // and phi nodes.
- if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
- return opcode;
- }
+ // TODO(epertoso): we can probably get some size information out phi nodes.
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- MachineType left_type = LoadRepresentationOf(left->op());
- MachineType right_type = LoadRepresentationOf(right->op());
+ MachineType left_type = MachineTypeForNarrow(left, right);
+ MachineType right_type = MachineTypeForNarrow(right, left);
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
@@ -1775,11 +1848,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
g.UseRegister(right), cont);
}
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1826,9 +1894,11 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
}
return;
}
@@ -2036,6 +2106,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
@@ -2347,8 +2430,29 @@ void InstructionSelector::VisitCreateInt32x4(Node* node) {
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
// static
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index d2f64e8cf1..5d8594c92b 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -60,9 +60,7 @@ class X87OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
- (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
}
@@ -2130,6 +2128,10 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ UNREACHABLE();
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index a737d1e9e8..9f9e4264a7 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -312,6 +312,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1542,6 +1547,15 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X87OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));