summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS4
-rw-r--r--deps/v8/src/compiler/access-builder.cc153
-rw-r--r--deps/v8/src/compiler/access-builder.h31
-rw-r--r--deps/v8/src/compiler/access-info.cc24
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc315
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h25
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc26
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc321
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc749
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h146
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc145
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc428
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc498
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h76
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc8
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc4
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc59
-rw-r--r--deps/v8/src/compiler/branch-elimination.h9
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc53
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h3
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc399
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h36
-rw-r--r--deps/v8/src/compiler/c-linkage.cc8
-rw-r--r--deps/v8/src/compiler/check-elimination.cc76
-rw-r--r--deps/v8/src/compiler/check-elimination.h46
-rw-r--r--deps/v8/src/compiler/checkpoint-elimination.h2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc52
-rw-r--r--deps/v8/src/compiler/code-assembler.h7
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h17
-rw-r--r--deps/v8/src/compiler/code-generator.cc191
-rw-r--r--deps/v8/src/compiler/code-generator.h32
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/common-operator.cc5
-rw-r--r--deps/v8/src/compiler/common-operator.h1
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc1
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc391
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h14
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc17
-rw-r--r--deps/v8/src/compiler/frame-states.cc140
-rw-r--r--deps/v8/src/compiler/frame-states.h70
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc2
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc3
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc19
-rw-r--r--deps/v8/src/compiler/graph-reducer.h3
-rw-r--r--deps/v8/src/compiler/graph.h59
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc275
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h15
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc18
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc87
-rw-r--r--deps/v8/src/compiler/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc25
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h37
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h2
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc362
-rw-r--r--deps/v8/src/compiler/instruction-selector.h21
-rw-r--r--deps/v8/src/compiler/instruction.cc49
-rw-r--r--deps/v8/src/compiler/instruction.h67
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc7
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc665
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h16
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc930
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h31
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc5
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h5
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc279
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h6
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h2
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc182
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-graph.cc13
-rw-r--r--deps/v8/src/compiler/js-graph.h6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc56
-rw-r--r--deps/v8/src/compiler/js-inlining.h4
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc25
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc1344
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h93
-rw-r--r--deps/v8/src/compiler/js-operator.cc137
-rw-r--r--deps/v8/src/compiler/js-operator.h67
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc54
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h15
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc529
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h13
-rw-r--r--deps/v8/src/compiler/linkage.cc14
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.cc233
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h171
-rw-r--r--deps/v8/src/compiler/load-elimination.cc125
-rw-r--r--deps/v8/src/compiler/load-elimination.h48
-rw-r--r--deps/v8/src/compiler/loop-analysis.h1
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc91
-rw-r--r--deps/v8/src/compiler/machine-operator.h51
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc1029
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h91
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc641
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc1248
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h95
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc729
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h3
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h41
-rw-r--r--deps/v8/src/compiler/operator-properties.cc5
-rw-r--r--deps/v8/src/compiler/operator.cc10
-rw-r--r--deps/v8/src/compiler/operator.h7
-rw-r--r--deps/v8/src/compiler/osr.h4
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc200
-rw-r--r--deps/v8/src/compiler/pipeline.h6
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc189
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc1
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc17
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc271
-rw-r--r--deps/v8/src/compiler/property-access-builder.h80
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc14
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc12
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h2
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc39
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h4
-rw-r--r--deps/v8/src/compiler/register-allocator.cc11
-rw-r--r--deps/v8/src/compiler/register-allocator.h5
-rw-r--r--deps/v8/src/compiler/representation-change.cc43
-rw-r--r--deps/v8/src/compiler/representation-change.h1
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc145
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc19
-rw-r--r--deps/v8/src/compiler/schedule.cc1
-rw-r--r--deps/v8/src/compiler/scheduler.cc6
-rw-r--r--deps/v8/src/compiler/select-lowering.h2
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc260
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h16
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc152
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc172
-rw-r--r--deps/v8/src/compiler/simplified-operator.h21
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc1
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc80
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.h41
-rw-r--r--deps/v8/src/compiler/type-cache.h1
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc36
-rw-r--r--deps/v8/src/compiler/typed-optimization.h5
-rw-r--r--deps/v8/src/compiler/typer.cc109
-rw-r--r--deps/v8/src/compiler/types.cc56
-rw-r--r--deps/v8/src/compiler/types.h59
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.h2
-rw-r--r--deps/v8/src/compiler/verifier.cc48
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc911
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h93
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc51
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc324
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h15
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc20
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc70
-rw-r--r--deps/v8/src/compiler/x87/OWNERS2
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc2772
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h144
-rw-r--r--deps/v8/src/compiler/x87/instruction-scheduler-x87.cc26
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc1881
164 files changed, 11639 insertions, 11593 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 3a26acc668..b63f5431e2 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -7,6 +7,8 @@ mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
tebbi@chromium.org
+neis@chromium.org
+mvstanton@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
@@ -14,3 +16,5 @@ per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
+
+# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 5fbbdd09da..7712aac131 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -16,15 +16,6 @@ namespace internal {
namespace compiler {
// static
-FieldAccess AccessBuilder::ForExternalDoubleValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForExternalTaggedValue() {
FieldAccess access = {kUntaggedBase, 0,
MaybeHandle<Name>(), MaybeHandle<Map>(),
@@ -64,7 +55,7 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -113,6 +104,28 @@ FieldAccess AccessBuilder::ForJSCollectionTable() {
}
// static
+FieldAccess AccessBuilder::ForJSCollectionIteratorTable() {
+ FieldAccess access = {
+ kTaggedBase, JSCollectionIterator::kTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSCollectionIteratorIndex() {
+ FieldAccess access = {kTaggedBase,
+ JSCollectionIterator::kIndexOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
FieldAccess access = {
kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
@@ -171,6 +184,35 @@ FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
}
// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundTargetFunction() {
+ FieldAccess access = {
+ kTaggedBase, JSBoundFunction::kBoundTargetFunctionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Callable(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundThis() {
+ FieldAccess access = {kTaggedBase, JSBoundFunction::kBoundThisOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSBoundFunctionBoundArguments() {
+ FieldAccess access = {
+ kTaggedBase, JSBoundFunction::kBoundArgumentsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
FieldAccess access = {kTaggedBase, JSGeneratorObject::kContextOffset,
Handle<Name>(), MaybeHandle<Map>(),
@@ -249,16 +291,6 @@ FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
}
// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
- FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
FieldAccess access = {
kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
@@ -278,7 +310,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
type_cache.kJSArrayLengthType,
MachineType::TaggedSigned(),
kFullWriteBarrier};
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
access.type = type_cache.kFixedDoubleArrayLengthType;
access.write_barrier_kind = kNoWriteBarrier;
} else if (IsFastElementsKind(elements_kind)) {
@@ -481,6 +513,14 @@ FieldAccess AccessBuilder::ForMapBitField() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForMapBitField2() {
+ FieldAccess access = {
+ kTaggedBase, Map::kBitField2Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
@@ -691,7 +731,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
MachineType::AnyTagged(),
kFullWriteBarrier};
if (instance_type == JS_ARRAY_TYPE) {
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
access.type = TypeCache::Get().kFixedDoubleArrayLengthType;
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
@@ -836,25 +876,25 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
- case FAST_SMI_ELEMENTS:
+ case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
access.machine_type = MachineType::TaggedSigned();
access.write_barrier_kind = kNoWriteBarrier;
break;
- case FAST_HOLEY_SMI_ELEMENTS:
+ case HOLEY_SMI_ELEMENTS:
access.type = TypeCache::Get().kHoleySmi;
break;
- case FAST_ELEMENTS:
+ case PACKED_ELEMENTS:
access.type = Type::NonInternal();
break;
- case FAST_HOLEY_ELEMENTS:
+ case HOLEY_ELEMENTS:
break;
- case FAST_DOUBLE_ELEMENTS:
+ case PACKED_DOUBLE_ELEMENTS:
access.type = Type::Number();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
break;
- case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case HOLEY_DOUBLE_ELEMENTS:
access.type = Type::NumberOrHole();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
@@ -966,10 +1006,65 @@ FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
}
// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNextTable() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {
+ kTaggedBase, OrderedHashTableBase::kNextTableOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {kTaggedBase,
+ OrderedHashTableBase::kNumberOfBucketsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {
+ kTaggedBase,
+ OrderedHashTableBase::kNumberOfDeletedElementsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForOrderedHashTableBaseNumberOfElements() {
+ // TODO(turbofan): This will be redundant with the HashTableBase
+ // methods above once the hash table unification is done.
+ FieldAccess const access = {kTaggedBase,
+ OrderedHashTableBase::kNumberOfElementsOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ TypeCache::Get().kFixedArrayLengthType,
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
FieldAccess access = {
kTaggedBase,
- FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+ FixedArray::OffsetOfElementAt(SeededNumberDictionary::kMaxNumberKeyIndex),
MaybeHandle<Name>(),
MaybeHandle<Map>(),
Type::Any(),
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index b4c3ed0615..cbe3722a14 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -23,9 +23,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// ===========================================================================
// Access to external values (based on external references).
- // Provides access to a double field identified by an external reference.
- static FieldAccess ForExternalDoubleValue();
-
// Provides access to a tagged field identified by an external reference.
static FieldAccess ForExternalTaggedValue();
@@ -55,6 +52,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSCollecton::table() field.
static FieldAccess ForJSCollectionTable();
+ // Provides access to JSCollectionIterator::table() field.
+ static FieldAccess ForJSCollectionIteratorTable();
+
+ // Provides access to JSCollectionIterator::index() field.
+ static FieldAccess ForJSCollectionIteratorIndex();
+
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -73,6 +76,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSFunction::next_function_link() field.
static FieldAccess ForJSFunctionNextFunctionLink();
+ // Provides access to JSBoundFunction::bound_target_function() field.
+ static FieldAccess ForJSBoundFunctionBoundTargetFunction();
+
+ // Provides access to JSBoundFunction::bound_this() field.
+ static FieldAccess ForJSBoundFunctionBoundThis();
+
+ // Provides access to JSBoundFunction::bound_arguments() field.
+ static FieldAccess ForJSBoundFunctionBoundArguments();
+
// Provides access to JSGeneratorObject::context() field.
static FieldAccess ForJSGeneratorObjectContext();
@@ -97,10 +109,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSAsyncGeneratorObject::queue() field.
static FieldAccess ForJSAsyncGeneratorObjectQueue();
- // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
- // field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
-
// Provides access to JSAsyncGeneratorObject::awaited_promise() field.
static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
@@ -161,6 +169,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to Map::bit_field() byte.
static FieldAccess ForMapBitField();
+ // Provides access to Map::bit_field2() byte.
+ static FieldAccess ForMapBitField2();
+
// Provides access to Map::bit_field3() field.
static FieldAccess ForMapBitField3();
@@ -274,6 +285,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForHashTableBaseNumberOfDeletedElement();
static FieldAccess ForHashTableBaseCapacity();
+ // Provides access to OrderedHashTableBase fields.
+ static FieldAccess ForOrderedHashTableBaseNextTable();
+ static FieldAccess ForOrderedHashTableBaseNumberOfBuckets();
+ static FieldAccess ForOrderedHashTableBaseNumberOfElements();
+ static FieldAccess ForOrderedHashTableBaseNumberOfDeletedElements();
+
// Provides access to Dictionary fields.
static FieldAccess ForDictionaryMaxNumberKey();
static FieldAccess ForDictionaryNextEnumerationIndex();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 196bf9e896..f6705cc294 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -56,7 +56,6 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os << "StoreInLiteral";
}
UNREACHABLE();
- return os;
}
ElementAccessInfo::ElementAccessInfo() {}
@@ -213,7 +212,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
UNREACHABLE();
- return false;
}
AccessInfoFactory::AccessInfoFactory(CompilationDependencies* dependencies,
@@ -411,9 +409,15 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
isolate());
if (!accessor->IsJSFunction()) {
CallOptimization optimization(accessor);
- if (!optimization.is_simple_api_call()) {
- return false;
- }
+ if (!optimization.is_simple_api_call()) return false;
+ CallOptimization::HolderLookup lookup;
+ holder =
+ optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return false;
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
+ holder.is_null());
+ DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound,
+ !holder.is_null());
if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
if (access_mode == AccessMode::kLoad) {
@@ -433,7 +437,6 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
UNREACHABLE();
- return false;
}
// Don't search on the prototype chain for special indices in case of
@@ -516,14 +519,13 @@ namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
ElementsKind that_kind) {
- if (IsHoleyElementsKind(this_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(this_kind)) {
that_kind = GetHoleyElementsKind(that_kind);
- } else if (IsHoleyElementsKind(that_kind)) {
+ } else if (IsHoleyOrDictionaryElementsKind(that_kind)) {
this_kind = GetHoleyElementsKind(this_kind);
}
if (this_kind == that_kind) return Just(this_kind);
- if (IsFastDoubleElementsKind(that_kind) ==
- IsFastDoubleElementsKind(this_kind)) {
+ if (IsDoubleElementsKind(that_kind) == IsDoubleElementsKind(this_kind)) {
if (IsMoreGeneralElementsKindTransition(that_kind, this_kind)) {
return Just(this_kind);
}
@@ -575,7 +577,7 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
// elements, a smi in the range [0, FixedArray::kMaxLength]
// in case of other fast elements, and [0, kMaxUInt32] in
// case of other arrays.
- if (IsFastDoubleElementsKind(map->elements_kind())) {
+ if (IsDoubleElementsKind(map->elements_kind())) {
field_type = type_cache_.kFixedDoubleArrayLengthType;
field_representation = MachineRepresentation::kTaggedSigned;
} else if (IsFastElementsKind(map->elements_kind())) {
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 953b6a15ea..5124491695 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -11,14 +11,15 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/double.h"
+#include "src/float.h"
#include "src/heap/heap-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchReg r9
@@ -40,7 +41,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return LeaveCC;
}
UNREACHABLE();
- return LeaveCC;
}
Operand InputImmediate(size_t index) {
@@ -49,11 +49,9 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
@@ -61,7 +59,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
Operand InputOperand2(size_t first_index) {
@@ -93,7 +90,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand InputOffset(size_t* first_index) {
@@ -122,7 +118,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand InputOffset(size_t first_index = 0) {
@@ -150,7 +145,7 @@ class OutOfLineLoadFloat final : public OutOfLineCode {
void Generate() final {
// Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
- __ vmov(result_, -1.0f);
+ __ vmov(result_, Float32(-1.0f));
__ vsqrt(result_, result_);
}
@@ -165,7 +160,7 @@ class OutOfLineLoadDouble final : public OutOfLineCode {
void Generate() final {
// Compute sqrt(-1.0), which results in a quiet double-precision NaN.
- __ vmov(result_, -1.0);
+ __ vmov(result_, Double(-1.0));
__ vsqrt(result_, result_);
}
@@ -201,7 +196,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
Register value, Register scratch0, Register scratch1,
@@ -216,7 +212,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -235,15 +232,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ Push(lr);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset());
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (index_.is(no_reg)) {
__ add(scratch1_, object_, Operand(index_immediate_));
} else {
DCHECK_EQ(0, index_immediate_);
__ add(scratch1_, object_, Operand(index_));
}
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -260,6 +257,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
template <typename T>
@@ -344,15 +342,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
-}
-
-int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
- // If unary shuffle, table is src0 (2 d-registers).
- if (src0.is(src1)) return 2;
- // Binary shuffle, table is src0, src1. They must be consecutive
- DCHECK_EQ(src0.code() + 1, src1.code());
- return 4; // 4 d-registers.
}
} // namespace
@@ -479,12 +468,12 @@ int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
@@ -494,11 +483,11 @@ int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 1, kScratchReg); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
@@ -580,20 +569,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->push((*pending_pushes)[0]);
+ tasm->push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -604,18 +593,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -623,15 +612,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
+ tasm->sub(sp, sp, Operand(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->add(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -654,20 +643,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ ldr(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -675,15 +664,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -697,10 +686,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ add(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -713,14 +706,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ add(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -776,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ __ PrepareCallCFunction(num_parameters);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -850,7 +847,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -945,8 +943,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ vmov(d0, d2);
break;
}
@@ -983,7 +981,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(2), i.OutputSBit());
break;
case kArmMls: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
i.InputRegister(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1007,13 +1005,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(1), i.OutputSBit());
break;
case kArmSdiv: {
- CpuFeatureScope scope(masm(), SUDIV);
+ CpuFeatureScope scope(tasm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUdiv: {
- CpuFeatureScope scope(masm(), SUDIV);
+ CpuFeatureScope scope(tasm(), SUDIV);
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1041,20 +1039,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.OutputSBit());
break;
case kArmBfc: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmUbfx: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmSbfx: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
i.InputInt8(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1097,7 +1095,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmRbit: {
- CpuFeatureScope scope(masm(), ARMv7);
+ CpuFeatureScope scope(tasm(), ARMv7);
__ rbit(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -1288,12 +1286,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmVmodF64: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
- __ PrepareCallCFunction(0, 2, kScratchReg);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ __ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -1309,47 +1307,47 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintmF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintpF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintpF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintzF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintzF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintaF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kArmVrintnF32: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
break;
}
case kArmVrintnF64: {
- CpuFeatureScope scope(masm(), ARMv8);
+ CpuFeatureScope scope(tasm(), ARMv8);
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
@@ -1797,14 +1795,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI32x4LtS: {
- __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GtS: {
+ __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI32x4LeS: {
- __ vcge(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GeS: {
+ __ vcge(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI32x4UConvertF32x4: {
@@ -1836,14 +1834,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI32x4LtU: {
- __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GtU: {
+ __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI32x4LeU: {
- __ vcge(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI32x4GeU: {
+ __ vcge(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI16x8Splat: {
@@ -1937,14 +1935,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI16x8LtS: {
- __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GtS: {
+ __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI16x8LeS: {
- __ vcge(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GeS: {
+ __ vcge(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI16x8UConvertI8x16Low: {
@@ -1985,14 +1983,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI16x8LtU: {
- __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GtU: {
+ __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI16x8LeU: {
- __ vcge(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI16x8GeU: {
+ __ vcge(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI8x16Splat: {
@@ -2072,14 +2070,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmvn(dst, dst);
break;
}
- case kArmI8x16LtS: {
- __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GtS: {
+ __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI8x16LeS: {
- __ vcge(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GeS: {
+ __ vcge(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmI8x16ShrU: {
@@ -2110,14 +2108,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmI8x16LtU: {
- __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GtU: {
+ __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kArmI8x16LeU: {
- __ vcge(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(0));
+ case kArmI8x16GeU: {
+ __ vcge(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kArmS128Zero: {
@@ -2145,10 +2143,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmS128Select: {
- // vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
- DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
- __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ vbsl(dst, i.InputSimd128Register(1), i.InputSimd128Register(2));
break;
}
case kArmS32x4ZipLeft: {
@@ -2289,39 +2286,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
break;
}
- case kArmS16x8Shuffle: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- DwVfpRegister table_base = src0.low();
- int table_size = GetVtblTableSize(src0, src1);
- // Convert the shuffle lane masks to byte masks in kScratchQuadReg.
- int scratch_s_base = kScratchQuadReg.code() * 4;
- for (int j = 0; j < 2; j++) {
- int32_t four_lanes = i.InputInt32(2 + j);
- for (int k = 0; k < 2; k++) {
- uint8_t w0 = (four_lanes & 0xF) * kShortSize;
- four_lanes >>= 8;
- uint8_t w1 = (four_lanes & 0xF) * kShortSize;
- four_lanes >>= 8;
- int32_t mask = w0 | ((w0 + 1) << 8) | (w1 << 16) | ((w1 + 1) << 24);
- // Ensure byte indices are in [0, 31] so masks are never NaNs.
- four_lanes &= 0x1F1F1F1F;
- __ vmov(SwVfpRegister::from_code(scratch_s_base + 2 * j + k),
- bit_cast<float>(mask));
- }
- }
- NeonListOperand table(table_base, table_size);
- if (!dst.is(src0) && !dst.is(src1)) {
- __ vtbl(dst.low(), table, kScratchQuadReg.low());
- __ vtbl(dst.high(), table, kScratchQuadReg.high());
- } else {
- __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
- __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
- __ vmov(dst, kScratchQuadReg);
- }
- break;
- }
case kArmS8x16ZipLeft: {
Simd128Register dst = i.OutputSimd128Register(),
src1 = i.InputSimd128Register(1);
@@ -2386,15 +2350,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
DwVfpRegister table_base = src0.low();
- int table_size = GetVtblTableSize(src0, src1);
+ // If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
+ // src1. They must be consecutive.
+ int table_size = src0.is(src1) ? 2 : 4;
+ DCHECK_IMPLIES(!src0.is(src1), src0.code() + 1 == src1.code());
// The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
int scratch_s_base = kScratchQuadReg.code() * 4;
for (int j = 0; j < 4; j++) {
- int32_t four_lanes = i.InputInt32(2 + j);
+ uint32_t four_lanes = i.InputUint32(2 + j);
// Ensure byte indices are in [0, 31] so masks are never NaNs.
four_lanes &= 0x1F1F1F1F;
__ vmov(SwVfpRegister::from_code(scratch_s_base + j),
- bit_cast<float>(four_lanes));
+ Float32(four_lanes));
}
NeonListOperand table(table_base, table_size);
if (!dst.is(src0) && !dst.is(src1)) {
@@ -2669,15 +2636,15 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// Therefore we emit a call to C here instead of a call to the runtime.
// We use the context register as the scratch register, because we do
// not have a context here.
- __ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ PrepareCallCFunction(0, 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2750,12 +2717,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2821,7 +2788,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
@@ -2840,7 +2807,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
__ Move(kScratchReg,
Operand(ExternalReference::address_of_real_stack_limit(
- isolate())));
+ __ isolate())));
__ ldr(kScratchReg, MemOperand(kScratchReg));
__ add(kScratchReg, kScratchReg,
Operand(shrink_slots * kPointerSize));
@@ -2855,7 +2822,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2937,7 +2904,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2979,12 +2946,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -3011,7 +2976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ str(ip, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
- __ vmov(dst, src.ToFloat32());
+ __ vmov(dst, Float32(src.ToFloat32AsInt()));
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
@@ -3078,7 +3043,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ vld1(Neon8, NeonListOperand(dst.low(), 2),
NeonMemOperand(kScratchReg));
}
- } else if (rep == MachineRepresentation::kFloat64) {
+ } else {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
@@ -3235,10 +3200,10 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block literal pool emission for duration of padding.
- v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index db3e515c40..00a4154ad3 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -160,16 +160,16 @@ namespace compiler {
V(ArmI32x4MaxS) \
V(ArmI32x4Eq) \
V(ArmI32x4Ne) \
- V(ArmI32x4LtS) \
- V(ArmI32x4LeS) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
V(ArmI32x4UConvertF32x4) \
V(ArmI32x4UConvertI16x8Low) \
V(ArmI32x4UConvertI16x8High) \
V(ArmI32x4ShrU) \
V(ArmI32x4MinU) \
V(ArmI32x4MaxU) \
- V(ArmI32x4LtU) \
- V(ArmI32x4LeU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
V(ArmI16x8Splat) \
V(ArmI16x8ExtractLane) \
V(ArmI16x8ReplaceLane) \
@@ -189,8 +189,8 @@ namespace compiler {
V(ArmI16x8MaxS) \
V(ArmI16x8Eq) \
V(ArmI16x8Ne) \
- V(ArmI16x8LtS) \
- V(ArmI16x8LeS) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
V(ArmI16x8UConvertI8x16Low) \
V(ArmI16x8UConvertI8x16High) \
V(ArmI16x8ShrU) \
@@ -199,8 +199,8 @@ namespace compiler {
V(ArmI16x8SubSaturateU) \
V(ArmI16x8MinU) \
V(ArmI16x8MaxU) \
- V(ArmI16x8LtU) \
- V(ArmI16x8LeU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
V(ArmI8x16Splat) \
V(ArmI8x16ExtractLane) \
V(ArmI8x16ReplaceLane) \
@@ -217,16 +217,16 @@ namespace compiler {
V(ArmI8x16MaxS) \
V(ArmI8x16Eq) \
V(ArmI8x16Ne) \
- V(ArmI8x16LtS) \
- V(ArmI8x16LeS) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
V(ArmI8x16ShrU) \
V(ArmI8x16UConvertI16x8) \
V(ArmI8x16AddSaturateU) \
V(ArmI8x16SubSaturateU) \
V(ArmI8x16MinU) \
V(ArmI8x16MaxU) \
- V(ArmI8x16LtU) \
- V(ArmI8x16LeU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
V(ArmS128Zero) \
V(ArmS128And) \
V(ArmS128Or) \
@@ -246,7 +246,6 @@ namespace compiler {
V(ArmS16x8UnzipRight) \
V(ArmS16x8TransposeLeft) \
V(ArmS16x8TransposeRight) \
- V(ArmS16x8Shuffle) \
V(ArmS8x16ZipLeft) \
V(ArmS8x16ZipRight) \
V(ArmS8x16UnzipLeft) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 549752d09e..7b1f1b30f3 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -144,16 +144,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4MaxS:
case kArmI32x4Eq:
case kArmI32x4Ne:
- case kArmI32x4LtS:
- case kArmI32x4LeS:
+ case kArmI32x4GtS:
+ case kArmI32x4GeS:
case kArmI32x4UConvertF32x4:
case kArmI32x4UConvertI16x8Low:
case kArmI32x4UConvertI16x8High:
case kArmI32x4ShrU:
case kArmI32x4MinU:
case kArmI32x4MaxU:
- case kArmI32x4LtU:
- case kArmI32x4LeU:
+ case kArmI32x4GtU:
+ case kArmI32x4GeU:
case kArmI16x8Splat:
case kArmI16x8ExtractLane:
case kArmI16x8ReplaceLane:
@@ -173,8 +173,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8MaxS:
case kArmI16x8Eq:
case kArmI16x8Ne:
- case kArmI16x8LtS:
- case kArmI16x8LeS:
+ case kArmI16x8GtS:
+ case kArmI16x8GeS:
case kArmI16x8UConvertI8x16Low:
case kArmI16x8UConvertI8x16High:
case kArmI16x8ShrU:
@@ -183,8 +183,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8SubSaturateU:
case kArmI16x8MinU:
case kArmI16x8MaxU:
- case kArmI16x8LtU:
- case kArmI16x8LeU:
+ case kArmI16x8GtU:
+ case kArmI16x8GeU:
case kArmI8x16Splat:
case kArmI8x16ExtractLane:
case kArmI8x16ReplaceLane:
@@ -201,16 +201,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI8x16MaxS:
case kArmI8x16Eq:
case kArmI8x16Ne:
- case kArmI8x16LtS:
- case kArmI8x16LeS:
+ case kArmI8x16GtS:
+ case kArmI8x16GeS:
case kArmI8x16UConvertI16x8:
case kArmI8x16AddSaturateU:
case kArmI8x16SubSaturateU:
case kArmI8x16ShrU:
case kArmI8x16MinU:
case kArmI8x16MaxU:
- case kArmI8x16LtU:
- case kArmI8x16LeU:
+ case kArmI8x16GtU:
+ case kArmI8x16GeU:
case kArmS128Zero:
case kArmS128And:
case kArmS128Or:
@@ -230,7 +230,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS16x8UnzipRight:
case kArmS16x8TransposeLeft:
case kArmS16x8TransposeRight:
- case kArmS16x8Shuffle:
case kArmS8x16ZipLeft:
case kArmS8x16ZipRight:
case kArmS8x16UnzipLeft:
@@ -283,7 +282,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 8983c9b115..3840ae8158 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -112,15 +112,6 @@ void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
g.UseRegister(node->InputAt(1)));
}
-void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
- ArmOperandGenerator g(selector);
- // Use DefineSameAsFirst for ternary ops that clobber their first input,
- // e.g. the NEON vbsl instruction.
- selector->Emit(
- opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
-}
-
void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node);
@@ -459,9 +450,6 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArmVld1S128;
break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -549,9 +537,6 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArmVst1S128;
break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -758,9 +743,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -805,9 +787,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -882,11 +861,18 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t const shift = mshr.right().Value();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
- ((value == 0xff) || (value == 0xffff))) {
- // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
+ (value == 0xff)) {
+ // Merge SHR into AND by emitting a UXTB instruction with a
+ // bytewise rotation.
+ Emit(kArmUxtb, g.DefineAsRegister(m.node()),
+ g.UseRegister(mshr.left().node()),
+ g.TempImmediate(mshr.right().Value()));
+ return;
+ } else if (((shift == 8) || (shift == 16)) && (value == 0xffff)) {
+ // Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
- Emit((value == 0xff) ? kArmUxtb : kArmUxth,
- g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
+ Emit(kArmUxth, g.DefineAsRegister(m.node()),
+ g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
return;
} else if (IsSupported(ARMv7) && (width != 0) &&
@@ -1384,14 +1370,14 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
+ if (value < kMaxInt && base::bits::IsPowerOfTwo(value + 1)) {
Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
@@ -1728,7 +1714,6 @@ FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -2043,6 +2028,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2050,7 +2036,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2391,15 +2378,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16)
#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
-#define SIMD_ZERO_OP_LIST(V) \
- V(S128Zero) \
- V(S1x4Zero) \
- V(S1x8Zero) \
- V(S1x16Zero)
+ V(32x4, 4) \
+ V(16x8, 8) \
+ V(8x16, 16)
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4, kArmF32x4SConvertI32x4) \
@@ -2422,13 +2403,10 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8UConvertI8x16High, kArmI16x8UConvertI8x16High) \
V(I8x16Neg, kArmI8x16Neg) \
V(S128Not, kArmS128Not) \
- V(S1x4Not, kArmS128Not) \
V(S1x4AnyTrue, kArmS1x4AnyTrue) \
V(S1x4AllTrue, kArmS1x4AllTrue) \
- V(S1x8Not, kArmS128Not) \
V(S1x8AnyTrue, kArmS1x8AnyTrue) \
V(S1x8AllTrue, kArmS1x8AllTrue) \
- V(S1x16Not, kArmS128Not) \
V(S1x16AnyTrue, kArmS1x16AnyTrue) \
V(S1x16AllTrue, kArmS1x16AllTrue)
@@ -2462,12 +2440,12 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MaxS, kArmI32x4MaxS) \
V(I32x4Eq, kArmI32x4Eq) \
V(I32x4Ne, kArmI32x4Ne) \
- V(I32x4LtS, kArmI32x4LtS) \
- V(I32x4LeS, kArmI32x4LeS) \
+ V(I32x4GtS, kArmI32x4GtS) \
+ V(I32x4GeS, kArmI32x4GeS) \
V(I32x4MinU, kArmI32x4MinU) \
V(I32x4MaxU, kArmI32x4MaxU) \
- V(I32x4LtU, kArmI32x4LtU) \
- V(I32x4LeU, kArmI32x4LeU) \
+ V(I32x4GtU, kArmI32x4GtU) \
+ V(I32x4GeU, kArmI32x4GeU) \
V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
V(I16x8Add, kArmI16x8Add) \
V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
@@ -2479,15 +2457,15 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8MaxS, kArmI16x8MaxS) \
V(I16x8Eq, kArmI16x8Eq) \
V(I16x8Ne, kArmI16x8Ne) \
- V(I16x8LtS, kArmI16x8LtS) \
- V(I16x8LeS, kArmI16x8LeS) \
+ V(I16x8GtS, kArmI16x8GtS) \
+ V(I16x8GeS, kArmI16x8GeS) \
V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
V(I16x8MinU, kArmI16x8MinU) \
V(I16x8MaxU, kArmI16x8MaxU) \
- V(I16x8LtU, kArmI16x8LtU) \
- V(I16x8LeU, kArmI16x8LeU) \
+ V(I16x8GtU, kArmI16x8GtU) \
+ V(I16x8GeU, kArmI16x8GeU) \
V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
V(I8x16Add, kArmI8x16Add) \
V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
@@ -2498,27 +2476,23 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MaxS, kArmI8x16MaxS) \
V(I8x16Eq, kArmI8x16Eq) \
V(I8x16Ne, kArmI8x16Ne) \
- V(I8x16LtS, kArmI8x16LtS) \
- V(I8x16LeS, kArmI8x16LeS) \
+ V(I8x16GtS, kArmI8x16GtS) \
+ V(I8x16GeS, kArmI8x16GeS) \
V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
V(I8x16MinU, kArmI8x16MinU) \
V(I8x16MaxU, kArmI8x16MaxU) \
- V(I8x16LtU, kArmI8x16LtU) \
- V(I8x16LeU, kArmI8x16LeU) \
+ V(I8x16GtU, kArmI8x16GtU) \
+ V(I8x16GeU, kArmI8x16GeU) \
V(S128And, kArmS128And) \
V(S128Or, kArmS128Or) \
- V(S128Xor, kArmS128Xor) \
- V(S1x4And, kArmS128And) \
- V(S1x4Or, kArmS128Or) \
- V(S1x4Xor, kArmS128Xor) \
- V(S1x8And, kArmS128And) \
- V(S1x8Or, kArmS128Or) \
- V(S1x8Xor, kArmS128Xor) \
- V(S1x16And, kArmS128And) \
- V(S1x16Or, kArmS128Or) \
- V(S1x16Xor, kArmS128Xor)
+ V(S128Xor, kArmS128Xor)
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
#define SIMD_VISIT_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -2541,14 +2515,6 @@ SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
#undef SIMD_VISIT_REPLACE_LANE
-#define SIMD_VISIT_ZERO_OP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- ArmOperandGenerator g(this); \
- Emit(kArmS128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
- }
-SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
-#undef SIMD_VISIT_ZERO_OP
-
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
VisitRR(this, instruction, node); \
@@ -2570,40 +2536,79 @@ SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
-#define SIMD_VISIT_SELECT_OP(format) \
- void InstructionSelector::VisitS##format##Select(Node* node) { \
- VisitRRRR(this, kArmS128Select, node); \
- }
-SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
-#undef SIMD_VISIT_SELECT_OP
+void InstructionSelector::VisitS128Select(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmS128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
namespace {
-template <int LANES>
+
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
+}
+
+// Tries to match byte shuffle to concatenate (vext) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
+}
+
struct ShuffleEntry {
- uint8_t shuffle[LANES];
+ uint8_t shuffle[kSimd128Size];
ArchOpcode opcode;
};
-static const ShuffleEntry<4> arch_s32x4_shuffles[] = {
- {{0, 4, 1, 5}, kArmS32x4ZipLeft},
- {{2, 6, 3, 7}, kArmS32x4ZipRight},
- {{0, 2, 4, 6}, kArmS32x4UnzipLeft},
- {{1, 3, 5, 7}, kArmS32x4UnzipRight},
- {{0, 4, 2, 6}, kArmS32x4TransposeLeft},
- {{1, 5, 3, 7}, kArmS32x4TransposeRight},
- {{1, 0, 3, 2}, kArmS32x2Reverse}};
-
-static const ShuffleEntry<8> arch_s16x8_shuffles[] = {
- {{0, 8, 1, 9, 2, 10, 3, 11}, kArmS16x8ZipLeft},
- {{4, 12, 5, 13, 6, 14, 7, 15}, kArmS16x8ZipRight},
- {{0, 2, 4, 6, 8, 10, 12, 14}, kArmS16x8UnzipLeft},
- {{1, 3, 5, 7, 9, 11, 13, 15}, kArmS16x8UnzipRight},
- {{0, 8, 2, 10, 4, 12, 6, 14}, kArmS16x8TransposeLeft},
- {{1, 9, 3, 11, 5, 13, 7, 15}, kArmS16x8TransposeRight},
- {{3, 2, 1, 0, 7, 6, 5, 4}, kArmS16x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6}, kArmS16x2Reverse}};
-
-static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kArmS32x4ZipLeft},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArmS32x4ZipRight},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kArmS32x4UnzipLeft},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kArmS32x4UnzipRight},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kArmS32x4TransposeLeft},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArmS32x4TransposeRight},
+ {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11}, kArmS32x2Reverse},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kArmS16x8ZipLeft},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kArmS16x8ZipRight},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kArmS16x8UnzipLeft},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kArmS16x8UnzipRight},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kArmS16x8TransposeLeft},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kArmS16x8TransposeRight},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kArmS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kArmS16x2Reverse},
+
{{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
kArmS8x16ZipLeft},
{{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
@@ -2620,45 +2625,28 @@ static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
{{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
{{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
-// Use a non-shuffle opcode to signal no match.
-static const ArchOpcode kNoShuffle = kArmS128Not;
-
-template <int LANES>
-ArchOpcode TryMatchArchShuffle(const uint8_t* shuffle,
- const ShuffleEntry<LANES>* table,
- size_t num_entries, uint8_t mask) {
- for (size_t i = 0; i < num_entries; i++) {
- const ShuffleEntry<LANES>& entry = table[i];
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
int j = 0;
- for (; j < LANES; j++) {
+ for (; j < kSimd128Size; ++j) {
if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
break;
}
}
- if (j == LANES) return entry.opcode;
- }
- return kNoShuffle;
-}
-
-// Returns the bias if shuffle is a concatenation, 0 otherwise.
-template <int LANES>
-uint8_t TryMatchConcat(const uint8_t* shuffle, uint8_t mask) {
- uint8_t start = shuffle[0];
- int i = 1;
- for (; i < LANES - start; i++) {
- if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return 0;
- }
- uint8_t wrap = LANES;
- for (; i < LANES; i++, wrap++) {
- if ((shuffle[i] & mask) != (wrap & mask)) return 0;
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
}
- return start;
+ return false;
}
// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
// will ignore the high bit of indices in some cases.
-uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
- int num_lanes) {
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
const uint8_t* shuffle = OpParameter<uint8_t*>(node);
uint8_t mask = 0xff;
// If shuffle is unary, set 'mask' to ignore the high bit of the indices.
@@ -2666,12 +2654,12 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
if (selector->GetVirtualRegister(node->InputAt(0)) ==
selector->GetVirtualRegister(node->InputAt(1))) {
// unary, src0 == src1.
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
} else {
bool src0_is_used = false;
bool src1_is_used = false;
- for (int i = 0; i < num_lanes; i++) {
- if (shuffle[i] < num_lanes) {
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
src0_is_used = true;
} else {
src1_is_used = true;
@@ -2679,10 +2667,10 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
}
if (src0_is_used && !src1_is_used) {
node->ReplaceInput(1, node->InputAt(0));
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
} else if (src1_is_used && !src0_is_used) {
node->ReplaceInput(0, node->InputAt(1));
- mask = num_lanes - 1;
+ mask = kUnaryShuffleMask;
}
}
return mask;
@@ -2690,7 +2678,7 @@ uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
int32_t result = 0;
- for (int i = 3; i >= 0; i--) {
+ for (int i = 3; i >= 0; --i) {
result <<= 8;
result |= shuffle[i] & mask;
}
@@ -2711,70 +2699,29 @@ void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
} // namespace
-void InstructionSelector::VisitS32x4Shuffle(Node* node) {
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 4);
- ArchOpcode opcode = TryMatchArchShuffle<4>(
- shuffle, arch_s32x4_shuffles, arraysize(arch_s32x4_shuffles), mask);
- if (opcode != kNoShuffle) {
- VisitRRRShuffle(this, opcode, node);
- return;
- }
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
ArmOperandGenerator g(this);
- uint8_t lanes = TryMatchConcat<4>(shuffle, mask);
- if (lanes != 0) {
- Emit(kArmS8x16Concat, g.DefineAsRegister(node),
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(lanes * 4));
- return;
- }
- Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseImmediate(Pack4Lanes(shuffle, mask)));
-}
-
-void InstructionSelector::VisitS16x8Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 8);
- ArchOpcode opcode = TryMatchArchShuffle<8>(
- shuffle, arch_s16x8_shuffles, arraysize(arch_s16x8_shuffles), mask);
- if (opcode != kNoShuffle) {
- VisitRRRShuffle(this, opcode, node);
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
return;
}
- ArmOperandGenerator g(this);
- Node* input0 = node->InputAt(0);
- Node* input1 = node->InputAt(1);
- uint8_t lanes = TryMatchConcat<8>(shuffle, mask);
- if (lanes != 0) {
- Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(lanes * 2));
- return;
- }
- // Code generator uses vtbl, arrange sources to form a valid lookup table.
- InstructionOperand src0, src1;
- ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
- Emit(kArmS16x8Shuffle, g.DefineAsRegister(node), src0, src1,
- g.UseImmediate(Pack4Lanes(shuffle, mask)),
- g.UseImmediate(Pack4Lanes(shuffle + 4, mask)));
-}
-
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
- const uint8_t* shuffle = OpParameter<uint8_t*>(node);
- uint8_t mask = CanonicalizeShuffle(this, node, 16);
- ArchOpcode opcode = TryMatchArchShuffle<16>(
- shuffle, arch_s8x16_shuffles, arraysize(arch_s8x16_shuffles), mask);
- if (opcode != kNoShuffle) {
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
VisitRRRShuffle(this, opcode, node);
return;
}
- ArmOperandGenerator g(this);
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t lanes = TryMatchConcat<16>(shuffle, mask);
- if (lanes != 0) {
+ uint8_t offset;
+ if (TryMatchConcat(shuffle, mask, &offset)) {
Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1), g.UseImmediate(lanes));
+ g.UseRegister(input1), g.UseImmediate(offset));
return;
}
// Code generator uses vtbl, arrange sources to form a valid lookup table.
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 88311c35e8..b36aab4aa0 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -18,8 +18,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// Adds Arm64-specific methods to convert InstructionOperands.
class Arm64OperandConverter final : public InstructionOperandConverter {
@@ -35,6 +34,10 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputDoubleRegister(index);
}
+ DoubleRegister InputSimd128Register(size_t index) {
+ return InputDoubleRegister(index).Q();
+ }
+
CPURegister InputFloat32OrZeroRegister(size_t index) {
if (instr_->InputAt(index)->IsImmediate()) {
DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
@@ -59,6 +62,8 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+ DoubleRegister OutputSimd128Register() { return OutputDoubleRegister().Q(); }
+
Register InputRegister32(size_t index) {
return ToRegister(instr_->InputAt(index)).W();
}
@@ -83,10 +88,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return InputRegister64(index);
}
- Operand InputImmediate(size_t index) {
- return ToImmediate(instr_->InputAt(index));
- }
-
Operand InputOperand(size_t index) {
return ToOperand(instr_->InputAt(index));
}
@@ -132,7 +133,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
Operand InputOperand2_64(size_t index) {
@@ -162,7 +162,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
MemOperand MemoryOperand(size_t* first_index) {
@@ -190,7 +189,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t first_index = 0) {
@@ -228,11 +226,9 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(constant.ToInt64());
}
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand(Operand::EmbeddedNumber(constant.ToFloat32()));
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
case Constant::kHeapObject:
@@ -242,26 +238,25 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(-1);
}
- MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+ MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
DCHECK_NOT_NULL(op);
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
}
- MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
+ MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
if (offset.from_frame_pointer()) {
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
// Convert FP-offsets to SP-offsets if it results in better code.
if (Assembler::IsImmLSUnscaled(from_sp) ||
- Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
+ Assembler::IsImmLSScaled(from_sp, 3)) {
offset = FrameOffset::FromStackPointer(from_sp);
}
}
- return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+ return MemOperand(offset.from_stack_pointer() ? tasm->StackPointer() : fp,
offset.offset());
}
};
@@ -323,7 +318,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch1_(scratch1),
mode_(mode),
must_save_lr_(!gen->frame_access_state()->has_frame()),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -343,10 +339,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Add(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
@@ -362,6 +358,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
RecordWriteMode const mode_;
bool must_save_lr_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
@@ -416,21 +413,20 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return mi;
}
UNREACHABLE();
- return nv;
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
- do { \
- if (length.IsImmediate() && \
- base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
- __ Tst(offset, ~(length.ImmediateValue() - 1)); \
- __ B(ne, out_of_bounds); \
- } else { \
- __ Cmp(offset, length); \
- __ B(hs, out_of_bounds); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
+ do { \
+ if (length.IsImmediate() && \
+ base::bits::IsPowerOfTwo(length.ImmediateValue())) { \
+ __ Tst(offset, ~(length.ImmediateValue() - 1)); \
+ __ B(ne, out_of_bounds); \
+ } else { \
+ __ Cmp(offset, length); \
+ __ B(hs, out_of_bounds); \
+ } \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
@@ -569,18 +565,18 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ cbnz(i.TempRegister32(1), &binop); \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
} while (0)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -631,7 +627,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -639,10 +635,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Claim(stack_slot_delta);
+ tasm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Drop(-stack_slot_delta);
+ tasm->Drop(-stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -651,13 +647,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -669,10 +665,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
Register target = i.InputRegister(0);
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -696,14 +696,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
+ // We must not share code targets for calls to builtins for wasm code, as
+ // they might need to be patched individually.
+ internal::Assembler::BlockCodeTargetSharingScope scope;
+ if (info()->IsWasm()) scope.Open(tasm());
+
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
Register target = i.InputRegister(0);
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -727,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
@@ -755,7 +759,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
@@ -828,7 +832,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
- __ mov(i.OutputRegister(), masm()->StackPointer());
+ __ mov(i.OutputRegister(), tasm()->StackPointer());
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
@@ -841,7 +845,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -930,8 +935,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -1076,14 +1081,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
break;
case kArm64Imod: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Imod32: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireW();
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1091,14 +1096,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArm64Umod: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
break;
}
case kArm64Umod32: {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireW();
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
@@ -1233,7 +1238,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Align the CSP and store the previous JSSP on the stack. We do not
// need to modify the SP delta here, as we will continue to access the
// frame via JSSP.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
// TODO(arm64): Storing JSSP on the stack is redundant when calling a C
@@ -1241,7 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// calling a code object that uses the CSP as the stack pointer). See
// the code generation for kArchCallCodeObject vs. kArchCallCFunction
// (the latter does not restore CSP/JSSP).
- // MacroAssembler::CallCFunction() (safely) drops this extra slot
+ // TurboAssembler::CallCFunction() (safely) drops this extra slot
// anyway.
int sp_alignment = __ ActivationFrameAlignment();
__ Sub(tmp, jssp, kPointerSize);
@@ -1400,13 +1405,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64Mod: {
// TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
DCHECK(d0.is(i.InputDoubleRegister(0)));
DCHECK(d1.is(i.InputDoubleRegister(1)));
DCHECK(d0.is(i.OutputDoubleRegister()));
// TODO(dcarney): make sure this saves all relevant registers.
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2);
break;
}
case kArm64Float32Max: {
@@ -1544,7 +1549,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64Float64InsertLowWord32: {
// TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
__ Fmov(tmp, i.InputFloat64Register(0));
__ Bfi(tmp, i.InputRegister(1), 0, 32);
@@ -1553,7 +1558,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArm64Float64InsertHighWord32: {
// TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register tmp = scope.AcquireX();
__ Fmov(tmp.W(), i.InputFloat32Register(0));
__ Bfi(tmp, i.InputRegister(1), 32, 32);
@@ -1614,6 +1619,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrD:
__ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
break;
+ case kArm64LdrQ:
+ __ Ldr(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ case kArm64StrQ:
+ __ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
+ break;
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
break;
@@ -1745,10 +1756,438 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
+
+#define SIMD_UNOP_CASE(Op, Instr, FORMAT) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##FORMAT(), \
+ i.InputSimd128Register(0).V##FORMAT()); \
+ break;
+#define SIMD_WIDENING_UNOP_CASE(Op, Instr, WIDE, NARROW) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##WIDE(), \
+ i.InputSimd128Register(0).V##NARROW()); \
+ break;
+#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
+ case Op: \
+ __ Instr(i.OutputSimd128Register().V##FORMAT(), \
+ i.InputSimd128Register(0).V##FORMAT(), \
+ i.InputSimd128Register(1).V##FORMAT()); \
+ break;
+
+ case kArm64F32x4Splat: {
+ __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
+ break;
+ }
+ case kArm64F32x4ExtractLane: {
+ __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64F32x4ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V4S(),
+ src1 = i.InputSimd128Register(0).V4S();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
+ SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4AddHoriz, Faddp, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
+ SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
+ case kArm64F32x4Ne: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
+ i.InputSimd128Register(1).V4S());
+ __ Mvn(dst, dst);
+ break;
+ }
+ case kArm64F32x4Lt: {
+ __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ }
+ case kArm64F32x4Le: {
+ __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
+ i.InputSimd128Register(0).V4S());
+ break;
+ }
+ case kArm64I32x4Splat: {
+ __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I32x4ExtractLane: {
+ __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I32x4ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V4S(),
+ src1 = i.InputSimd128Register(0).V4S();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8Low, Sxtl, 4S, 4H);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4SConvertI16x8High, Sxtl2, 4S, 8H);
+ SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
+ case kArm64I32x4Shl: {
+ __ Shl(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I32x4ShrS: {
+ __ Sshr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4AddHoriz, Addp, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
+ case kArm64I32x4Ne: {
+ VRegister dst = i.OutputSimd128Register().V4S();
+ __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
+ i.InputSimd128Register(1).V4S());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
+ SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8Low, Uxtl, 4S, 4H);
+ SIMD_WIDENING_UNOP_CASE(kArm64I32x4UConvertI16x8High, Uxtl2, 4S, 8H);
+ case kArm64I32x4ShrU: {
+ __ Ushr(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
+ i.InputInt5(1));
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
+ SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
+ case kArm64I16x8Splat: {
+ __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I16x8ExtractLane: {
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I16x8ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V8H(),
+ src1 = i.InputSimd128Register(0).V8H();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16Low, Sxtl, 8H, 8B);
+ SIMD_WIDENING_UNOP_CASE(kArm64I16x8SConvertI8x16High, Sxtl2, 8H, 16B);
+ SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
+ case kArm64I16x8Shl: {
+ __ Shl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8ShrS: {
+ __ Sshr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8SConvertI32x4: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V4S());
+ src1 = temp;
+ }
+ __ Sqxtn(dst.V4H(), src0.V4S());
+ __ Sqxtn2(dst.V8H(), src1.V4S());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddSaturateS, Sqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8AddHoriz, Addp, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSaturateS, Sqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
+ case kArm64I16x8Ne: {
+ VRegister dst = i.OutputSimd128Register().V8H();
+ __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
+ i.InputSimd128Register(1).V8H());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
+ case kArm64I16x8UConvertI8x16Low: {
+ __ Uxtl(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8B());
+ break;
+ }
+ case kArm64I16x8UConvertI8x16High: {
+ __ Uxtl2(i.OutputSimd128Register().V8H(),
+ i.InputSimd128Register(0).V16B());
+ break;
+ }
+ case kArm64I16x8ShrU: {
+ __ Ushr(i.OutputSimd128Register().V8H(), i.InputSimd128Register(0).V8H(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I16x8UConvertI32x4: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V4S());
+ src1 = temp;
+ }
+ __ Uqxtn(dst.V4H(), src0.V4S());
+ __ Uqxtn2(dst.V8H(), src1.V4S());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I16x8AddSaturateU, Uqadd, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8SubSaturateU, Uqsub, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
+ SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
+ case kArm64I8x16Splat: {
+ __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64I8x16ExtractLane: {
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
+ i.InputInt8(1));
+ break;
+ }
+ case kArm64I8x16ReplaceLane: {
+ VRegister dst = i.OutputSimd128Register().V16B(),
+ src1 = i.InputSimd128Register(0).V16B();
+ if (!dst.is(src1)) {
+ __ Mov(dst, src1);
+ }
+ __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
+ case kArm64I8x16Shl: {
+ __ Shl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16ShrS: {
+ __ Sshr(i.OutputSimd128Register().V16B(),
+ i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16SConvertI16x8: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat8H);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V8H());
+ src1 = temp;
+ }
+ __ Sqxtn(dst.V8B(), src0.V8H());
+ __ Sqxtn2(dst.V16B(), src1.V8H());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16AddSaturateS, Sqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSaturateS, Sqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Mul, Mul, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
+ case kArm64I8x16Ne: {
+ VRegister dst = i.OutputSimd128Register().V16B();
+ __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B());
+ __ Mvn(dst, dst);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
+ case kArm64I8x16ShrU: {
+ __ Ushr(i.OutputSimd128Register().V16B(),
+ i.InputSimd128Register(0).V16B(), i.InputInt5(1));
+ break;
+ }
+ case kArm64I8x16UConvertI16x8: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat8H);
+ if (dst.is(src1)) {
+ __ Mov(temp, src1.V8H());
+ src1 = temp;
+ }
+ __ Uqxtn(dst.V8B(), src0.V8H());
+ __ Uqxtn2(dst.V16B(), src1.V8H());
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64I8x16AddSaturateU, Uqadd, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16SubSaturateU, Uqsub, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
+ SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
+ case kArm64S128Zero: {
+ __ Movi(i.OutputSimd128Register().V16B(), 0);
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64S128And, And, 16B);
+ SIMD_BINOP_CASE(kArm64S128Or, Orr, 16B);
+ SIMD_BINOP_CASE(kArm64S128Xor, Eor, 16B);
+ SIMD_UNOP_CASE(kArm64S128Not, Mvn, 16B);
+ case kArm64S128Select: {
+ VRegister dst = i.OutputSimd128Register().V16B();
+ DCHECK(dst.is(i.InputSimd128Register(0).V16B()));
+ __ Bsl(dst, i.InputSimd128Register(1).V16B(),
+ i.InputSimd128Register(2).V16B());
+ break;
+ }
+ case kArm64S32x4Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register().V4S(),
+ src0 = i.InputSimd128Register(0).V4S(),
+ src1 = i.InputSimd128Register(1).V4S();
+ // Check for in-place shuffles.
+ // If dst == src0 == src1, then the shuffle is unary and we only use src0.
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat4S);
+ if (dst.is(src0)) {
+ __ Mov(temp, src0);
+ src0 = temp;
+ } else if (dst.is(src1)) {
+ __ Mov(temp, src1);
+ src1 = temp;
+ }
+ // Perform shuffle as a vmov per lane.
+ int32_t shuffle = i.InputInt32(2);
+ for (int i = 0; i < 4; i++) {
+ VRegister src = src0;
+ int lane = shuffle & 0x7;
+ if (lane >= 4) {
+ src = src1;
+ lane &= 0x3;
+ }
+ __ Mov(dst, i, src, lane);
+ shuffle >>= 8;
+ }
+ break;
+ }
+ SIMD_BINOP_CASE(kArm64S32x4ZipLeft, Zip1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4ZipRight, Zip2, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4UnzipLeft, Uzp1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4UnzipRight, Uzp2, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4TransposeLeft, Trn1, 4S);
+ SIMD_BINOP_CASE(kArm64S32x4TransposeRight, Trn2, 4S);
+ SIMD_BINOP_CASE(kArm64S16x8ZipLeft, Zip1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8ZipRight, Zip2, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8UnzipLeft, Uzp1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8UnzipRight, Uzp2, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8TransposeLeft, Trn1, 8H);
+ SIMD_BINOP_CASE(kArm64S16x8TransposeRight, Trn2, 8H);
+ SIMD_BINOP_CASE(kArm64S8x16ZipLeft, Zip1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16ZipRight, Zip2, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16UnzipLeft, Uzp1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16UnzipRight, Uzp2, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16TransposeLeft, Trn1, 16B);
+ SIMD_BINOP_CASE(kArm64S8x16TransposeRight, Trn2, 16B);
+ case kArm64S8x16Concat: {
+ __ Ext(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
+ i.InputSimd128Register(1).V16B(), i.InputInt4(2));
+ break;
+ }
+ case kArm64S8x16Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register().V16B(),
+ src0 = i.InputSimd128Register(0).V16B(),
+ src1 = i.InputSimd128Register(1).V16B();
+ // Unary shuffle table is in src0, binary shuffle table is in src0, src1,
+ // which must be consecutive.
+ int64_t mask = 0;
+ if (src0.is(src1)) {
+ mask = 0x0F0F0F0F;
+ } else {
+ mask = 0x1F1F1F1F;
+ DCHECK(AreConsecutive(src0, src1));
+ }
+ int64_t imm1 =
+ (i.InputInt32(2) & mask) | ((i.InputInt32(3) & mask) << 32);
+ int64_t imm2 =
+ (i.InputInt32(4) & mask) | ((i.InputInt32(5) & mask) << 32);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireV(kFormat16B);
+ __ Movi(temp, imm2, imm1);
+
+ if (src0.is(src1)) {
+ __ Tbl(dst, src0, temp.V16B());
+ } else {
+ __ Tbl(dst, src0, src1, temp.V16B());
+ }
+ break;
+ }
+ SIMD_UNOP_CASE(kArm64S32x2Reverse, Rev64, 4S);
+ SIMD_UNOP_CASE(kArm64S16x4Reverse, Rev64, 8H);
+ SIMD_UNOP_CASE(kArm64S16x2Reverse, Rev32, 8H);
+ SIMD_UNOP_CASE(kArm64S8x8Reverse, Rev64, 16B);
+ SIMD_UNOP_CASE(kArm64S8x4Reverse, Rev32, 16B);
+ SIMD_UNOP_CASE(kArm64S8x2Reverse, Rev16, 16B);
+
+#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
+ case Op: { \
+ UseScratchRegisterScope scope(tasm()); \
+ VRegister temp = scope.AcquireV(format); \
+ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
+ __ Umov(i.OutputRegister32(), temp, 0); \
+ break; \
+ }
+ SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64S1x4AllTrue, Uminv, kFormatS, 4S);
+ SIMD_REDUCE_OP_CASE(kArm64S1x8AnyTrue, Umaxv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64S1x8AllTrue, Uminv, kFormatH, 8H);
+ SIMD_REDUCE_OP_CASE(kArm64S1x16AnyTrue, Umaxv, kFormatB, 16B);
+ SIMD_REDUCE_OP_CASE(kArm64S1x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
} // NOLINT(readability/fn_size)
+#undef SIMD_UNOP_CASE
+#undef SIMD_WIDENING_UNOP_CASE
+#undef SIMD_BINOP_CASE
+#undef SIMD_REDUCE_OP_CASE
// Assemble branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -1843,9 +2282,9 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
if (trap_id == Builtins::builtin_count) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
@@ -1853,7 +2292,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -1903,7 +2342,7 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
Arm64OperandConverter i(this, instr);
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register input = i.InputRegister32(0);
Register temp = scope.AcquireX();
size_t const case_count = instr->InputCount() - 2;
@@ -1930,9 +2369,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -1950,11 +2389,11 @@ void CodeGenerator::FinishFrame(Frame* frame) {
}
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
frame->AllocateSavedCalleeRegisterSlots(saved_count *
(kDoubleSize / kPointerSize));
}
@@ -1984,7 +2423,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ Prologue(this->info()->GeneratePreagedPrologue());
} else {
__ Push(lr, fp);
- __ Mov(fp, masm_.StackPointer());
+ __ Mov(fp, __ StackPointer());
}
if (!info()->GeneratePreagedPrologue()) {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
@@ -2004,7 +2443,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
if (info()->IsWasm() && shrink_slots > 128) {
@@ -2017,11 +2456,10 @@ void CodeGenerator::AssembleConstructFrame() {
// exception unconditionally. Thereby we can avoid the integer overflow
// check in the condition code.
if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register scratch = scope.AcquireX();
- __ Mov(
- scratch,
- Operand(ExternalReference::address_of_real_stack_limit(isolate())));
+ __ Mov(scratch, Operand(ExternalReference::address_of_real_stack_limit(
+ __ isolate())));
__ Ldr(scratch, MemOperand(scratch));
__ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
__ Cmp(__ StackPointer(), scratch);
@@ -2040,7 +2478,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Initialize the jssp because it is required for the runtime call.
__ Mov(jssp, csp);
__ Move(cp, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
@@ -2065,7 +2503,7 @@ void CodeGenerator::AssembleConstructFrame() {
bool is_stub_frame =
!descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
if (is_stub_frame) {
- UseScratchRegisterScope temps(masm());
+ UseScratchRegisterScope temps(tasm());
Register temp = temps.AcquireX();
__ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
__ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
@@ -2073,11 +2511,11 @@ void CodeGenerator::AssembleConstructFrame() {
}
// Save FP registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
int saved_count = saves_fp.Count();
if (saved_count != 0) {
- DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
+ DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedV().list());
__ PushCPURegList(saves_fp);
}
// Save registers.
@@ -2103,7 +2541,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
// Restore fp registers.
- CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
+ CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
if (saves_fp.Count() != 0) {
__ PopCPURegList(saves_fp);
@@ -2155,7 +2593,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { masm()->CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2168,23 +2606,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ Mov(g.ToRegister(destination), src);
} else {
- __ Str(src, g.ToMemOperand(destination, masm()));
+ __ Str(src, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsStackSlot()) {
- MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand src = g.ToMemOperand(source, tasm());
DCHECK(destination->IsRegister() || destination->IsStackSlot());
if (destination->IsRegister()) {
__ Ldr(g.ToRegister(destination), src);
} else {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
__ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
if (src.type() == Constant::kHeapObject) {
@@ -2193,65 +2631,81 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
- __ LoadObject(dst, src_object);
+ __ Mov(dst, src_object);
}
} else {
__ Mov(dst, g.ToImmediate(source));
}
if (destination->IsStackSlot()) {
- __ Str(dst, g.ToMemOperand(destination, masm()));
+ __ Str(dst, g.ToMemOperand(destination, tasm()));
}
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination).S();
+ VRegister dst = g.ToDoubleRegister(destination).S();
__ Fmov(dst, src.ToFloat32());
} else {
DCHECK(destination->IsFPStackSlot());
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ Str(wzr, g.ToMemOperand(destination, masm()));
+ __ Str(wzr, g.ToMemOperand(destination, tasm()));
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireS();
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireS();
__ Fmov(temp, src.ToFloat32());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
- __ Fmov(dst, src.ToFloat64());
+ VRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src.ToFloat64().value());
} else {
DCHECK(destination->IsFPStackSlot());
- if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
- __ Str(xzr, g.ToMemOperand(destination, masm()));
+ if (src.ToFloat64().AsUint64() == 0) {
+ __ Str(xzr, g.ToMemOperand(destination, tasm()));
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Fmov(temp, src.ToFloat64());
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64().value());
+ __ Str(temp, g.ToMemOperand(destination, tasm()));
}
}
}
} else if (source->IsFPRegister()) {
- FPRegister src = g.ToDoubleRegister(source);
+ VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
+ VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ Str(src, g.ToMemOperand(destination, masm()));
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (destination->IsSimd128StackSlot()) {
+ __ Str(src.Q(), dst);
+ } else {
+ __ Str(src, dst);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source, masm());
+ MemOperand src = g.ToMemOperand(source, tasm());
if (destination->IsFPRegister()) {
- __ Ldr(g.ToDoubleRegister(destination), src);
+ VRegister dst = g.ToDoubleRegister(destination);
+ if (destination->IsSimd128Register()) {
+ __ Ldr(dst.Q(), src);
+ } else {
+ __ Ldr(dst, src);
+ }
} else {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Ldr(temp, src);
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (destination->IsSimd128StackSlot()) {
+ __ Ldr(temp.Q(), src);
+ __ Str(temp.Q(), dst);
+ } else {
+ __ Ldr(temp, src);
+ __ Str(temp, dst);
+ }
}
} else {
UNREACHABLE();
@@ -2266,7 +2720,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// combinations are possible.
if (source->IsRegister()) {
// Register-register.
- UseScratchRegisterScope scope(masm());
+ UseScratchRegisterScope scope(tasm());
Register temp = scope.AcquireX();
Register src = g.ToRegister(source);
if (destination->IsRegister()) {
@@ -2276,36 +2730,49 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ Mov(dst, temp);
} else {
DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination, masm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
__ Mov(temp, src);
__ Ldr(src, dst);
__ Str(temp, dst);
}
} else if (source->IsStackSlot() || source->IsFPStackSlot()) {
- UseScratchRegisterScope scope(masm());
- DoubleRegister temp_0 = scope.AcquireD();
- DoubleRegister temp_1 = scope.AcquireD();
- MemOperand src = g.ToMemOperand(source, masm());
- MemOperand dst = g.ToMemOperand(destination, masm());
- __ Ldr(temp_0, src);
- __ Ldr(temp_1, dst);
- __ Str(temp_0, dst);
- __ Str(temp_1, src);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp_0 = scope.AcquireD();
+ VRegister temp_1 = scope.AcquireD();
+ MemOperand src = g.ToMemOperand(source, tasm());
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsSimd128StackSlot()) {
+ __ Ldr(temp_0.Q(), src);
+ __ Ldr(temp_1.Q(), dst);
+ __ Str(temp_0.Q(), dst);
+ __ Str(temp_1.Q(), src);
+ } else {
+ __ Ldr(temp_0, src);
+ __ Ldr(temp_1, dst);
+ __ Str(temp_0, dst);
+ __ Str(temp_1, src);
+ }
} else if (source->IsFPRegister()) {
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- FPRegister src = g.ToDoubleRegister(source);
+ UseScratchRegisterScope scope(tasm());
+ VRegister temp = scope.AcquireD();
+ VRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
- FPRegister dst = g.ToDoubleRegister(destination);
+ VRegister dst = g.ToDoubleRegister(destination);
__ Fmov(temp, src);
__ Fmov(src, dst);
__ Fmov(dst, temp);
} else {
DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination, masm());
- __ Fmov(temp, src);
- __ Ldr(src, dst);
- __ Str(temp, dst);
+ MemOperand dst = g.ToMemOperand(destination, tasm());
+ if (source->IsSimd128Register()) {
+ __ Fmov(temp.Q(), src.Q());
+ __ Ldr(src.Q(), dst);
+ __ Str(temp.Q(), dst);
+ } else {
+ __ Fmov(temp, src);
+ __ Ldr(src, dst);
+ __ Str(temp, dst);
+ }
}
} else {
// No other combinations are possible.
@@ -2328,13 +2795,13 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
+ intptr_t current_pc = tasm()->pc_offset();
if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK((padding_size % kInstructionSize) == 0);
InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
+ tasm(), padding_size / kInstructionSize);
while (padding_size > 0) {
__ nop();
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 898a9e9b35..65c8729bdb 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -143,6 +143,8 @@ namespace compiler {
V(Arm64StrS) \
V(Arm64LdrD) \
V(Arm64StrD) \
+ V(Arm64LdrQ) \
+ V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
V(Arm64Strb) \
@@ -153,7 +155,149 @@ namespace compiler {
V(Arm64LdrW) \
V(Arm64StrW) \
V(Arm64Ldr) \
- V(Arm64Str)
+ V(Arm64Str) \
+ V(Arm64F32x4Splat) \
+ V(Arm64F32x4ExtractLane) \
+ V(Arm64F32x4ReplaceLane) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4Abs) \
+ V(Arm64F32x4Neg) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Add) \
+ V(Arm64F32x4AddHoriz) \
+ V(Arm64F32x4Sub) \
+ V(Arm64F32x4Mul) \
+ V(Arm64F32x4Min) \
+ V(Arm64F32x4Max) \
+ V(Arm64F32x4Eq) \
+ V(Arm64F32x4Ne) \
+ V(Arm64F32x4Lt) \
+ V(Arm64F32x4Le) \
+ V(Arm64I32x4Splat) \
+ V(Arm64I32x4ExtractLane) \
+ V(Arm64I32x4ReplaceLane) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4SConvertI16x8Low) \
+ V(Arm64I32x4SConvertI16x8High) \
+ V(Arm64I32x4Neg) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Add) \
+ V(Arm64I32x4AddHoriz) \
+ V(Arm64I32x4Sub) \
+ V(Arm64I32x4Mul) \
+ V(Arm64I32x4MinS) \
+ V(Arm64I32x4MaxS) \
+ V(Arm64I32x4Eq) \
+ V(Arm64I32x4Ne) \
+ V(Arm64I32x4GtS) \
+ V(Arm64I32x4GeS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4UConvertI16x8Low) \
+ V(Arm64I32x4UConvertI16x8High) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64I32x4MinU) \
+ V(Arm64I32x4MaxU) \
+ V(Arm64I32x4GtU) \
+ V(Arm64I32x4GeU) \
+ V(Arm64I16x8Splat) \
+ V(Arm64I16x8ExtractLane) \
+ V(Arm64I16x8ReplaceLane) \
+ V(Arm64I16x8SConvertI8x16Low) \
+ V(Arm64I16x8SConvertI8x16High) \
+ V(Arm64I16x8Neg) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64I16x8Add) \
+ V(Arm64I16x8AddSaturateS) \
+ V(Arm64I16x8AddHoriz) \
+ V(Arm64I16x8Sub) \
+ V(Arm64I16x8SubSaturateS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8MinS) \
+ V(Arm64I16x8MaxS) \
+ V(Arm64I16x8Eq) \
+ V(Arm64I16x8Ne) \
+ V(Arm64I16x8GtS) \
+ V(Arm64I16x8GeS) \
+ V(Arm64I16x8UConvertI8x16Low) \
+ V(Arm64I16x8UConvertI8x16High) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64I16x8AddSaturateU) \
+ V(Arm64I16x8SubSaturateU) \
+ V(Arm64I16x8MinU) \
+ V(Arm64I16x8MaxU) \
+ V(Arm64I16x8GtU) \
+ V(Arm64I16x8GeU) \
+ V(Arm64I8x16Splat) \
+ V(Arm64I8x16ExtractLane) \
+ V(Arm64I8x16ReplaceLane) \
+ V(Arm64I8x16Neg) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16Add) \
+ V(Arm64I8x16AddSaturateS) \
+ V(Arm64I8x16Sub) \
+ V(Arm64I8x16SubSaturateS) \
+ V(Arm64I8x16Mul) \
+ V(Arm64I8x16MinS) \
+ V(Arm64I8x16MaxS) \
+ V(Arm64I8x16Eq) \
+ V(Arm64I8x16Ne) \
+ V(Arm64I8x16GtS) \
+ V(Arm64I8x16GeS) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16AddSaturateU) \
+ V(Arm64I8x16SubSaturateU) \
+ V(Arm64I8x16MinU) \
+ V(Arm64I8x16MaxU) \
+ V(Arm64I8x16GtU) \
+ V(Arm64I8x16GeU) \
+ V(Arm64S128Zero) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64S8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64S1x4AnyTrue) \
+ V(Arm64S1x4AllTrue) \
+ V(Arm64S1x8AnyTrue) \
+ V(Arm64S1x8AllTrue) \
+ V(Arm64S1x16AnyTrue) \
+ V(Arm64S1x16AllTrue)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index d3504dfd22..994e157e17 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -132,6 +132,148 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
+ case kArm64F32x4Splat:
+ case kArm64F32x4ExtractLane:
+ case kArm64F32x4ReplaceLane:
+ case kArm64F32x4SConvertI32x4:
+ case kArm64F32x4UConvertI32x4:
+ case kArm64F32x4Abs:
+ case kArm64F32x4Neg:
+ case kArm64F32x4RecipApprox:
+ case kArm64F32x4RecipSqrtApprox:
+ case kArm64F32x4Add:
+ case kArm64F32x4AddHoriz:
+ case kArm64F32x4Sub:
+ case kArm64F32x4Mul:
+ case kArm64F32x4Min:
+ case kArm64F32x4Max:
+ case kArm64F32x4Eq:
+ case kArm64F32x4Ne:
+ case kArm64F32x4Lt:
+ case kArm64F32x4Le:
+ case kArm64I32x4Splat:
+ case kArm64I32x4ExtractLane:
+ case kArm64I32x4ReplaceLane:
+ case kArm64I32x4SConvertF32x4:
+ case kArm64I32x4SConvertI16x8Low:
+ case kArm64I32x4SConvertI16x8High:
+ case kArm64I32x4Neg:
+ case kArm64I32x4Shl:
+ case kArm64I32x4ShrS:
+ case kArm64I32x4Add:
+ case kArm64I32x4AddHoriz:
+ case kArm64I32x4Sub:
+ case kArm64I32x4Mul:
+ case kArm64I32x4MinS:
+ case kArm64I32x4MaxS:
+ case kArm64I32x4Eq:
+ case kArm64I32x4Ne:
+ case kArm64I32x4GtS:
+ case kArm64I32x4GeS:
+ case kArm64I32x4UConvertF32x4:
+ case kArm64I32x4UConvertI16x8Low:
+ case kArm64I32x4UConvertI16x8High:
+ case kArm64I32x4ShrU:
+ case kArm64I32x4MinU:
+ case kArm64I32x4MaxU:
+ case kArm64I32x4GtU:
+ case kArm64I32x4GeU:
+ case kArm64I16x8Splat:
+ case kArm64I16x8ExtractLane:
+ case kArm64I16x8ReplaceLane:
+ case kArm64I16x8SConvertI8x16Low:
+ case kArm64I16x8SConvertI8x16High:
+ case kArm64I16x8Neg:
+ case kArm64I16x8Shl:
+ case kArm64I16x8ShrS:
+ case kArm64I16x8SConvertI32x4:
+ case kArm64I16x8Add:
+ case kArm64I16x8AddSaturateS:
+ case kArm64I16x8AddHoriz:
+ case kArm64I16x8Sub:
+ case kArm64I16x8SubSaturateS:
+ case kArm64I16x8Mul:
+ case kArm64I16x8MinS:
+ case kArm64I16x8MaxS:
+ case kArm64I16x8Eq:
+ case kArm64I16x8Ne:
+ case kArm64I16x8GtS:
+ case kArm64I16x8GeS:
+ case kArm64I16x8UConvertI8x16Low:
+ case kArm64I16x8UConvertI8x16High:
+ case kArm64I16x8ShrU:
+ case kArm64I16x8UConvertI32x4:
+ case kArm64I16x8AddSaturateU:
+ case kArm64I16x8SubSaturateU:
+ case kArm64I16x8MinU:
+ case kArm64I16x8MaxU:
+ case kArm64I16x8GtU:
+ case kArm64I16x8GeU:
+ case kArm64I8x16Splat:
+ case kArm64I8x16ExtractLane:
+ case kArm64I8x16ReplaceLane:
+ case kArm64I8x16Neg:
+ case kArm64I8x16Shl:
+ case kArm64I8x16ShrS:
+ case kArm64I8x16SConvertI16x8:
+ case kArm64I8x16Add:
+ case kArm64I8x16AddSaturateS:
+ case kArm64I8x16Sub:
+ case kArm64I8x16SubSaturateS:
+ case kArm64I8x16Mul:
+ case kArm64I8x16MinS:
+ case kArm64I8x16MaxS:
+ case kArm64I8x16Eq:
+ case kArm64I8x16Ne:
+ case kArm64I8x16GtS:
+ case kArm64I8x16GeS:
+ case kArm64I8x16UConvertI16x8:
+ case kArm64I8x16AddSaturateU:
+ case kArm64I8x16SubSaturateU:
+ case kArm64I8x16ShrU:
+ case kArm64I8x16MinU:
+ case kArm64I8x16MaxU:
+ case kArm64I8x16GtU:
+ case kArm64I8x16GeU:
+ case kArm64S128Zero:
+ case kArm64S128And:
+ case kArm64S128Or:
+ case kArm64S128Xor:
+ case kArm64S128Not:
+ case kArm64S128Select:
+ case kArm64S32x4ZipLeft:
+ case kArm64S32x4ZipRight:
+ case kArm64S32x4UnzipLeft:
+ case kArm64S32x4UnzipRight:
+ case kArm64S32x4TransposeLeft:
+ case kArm64S32x4TransposeRight:
+ case kArm64S32x4Shuffle:
+ case kArm64S16x8ZipLeft:
+ case kArm64S16x8ZipRight:
+ case kArm64S16x8UnzipLeft:
+ case kArm64S16x8UnzipRight:
+ case kArm64S16x8TransposeLeft:
+ case kArm64S16x8TransposeRight:
+ case kArm64S8x16ZipLeft:
+ case kArm64S8x16ZipRight:
+ case kArm64S8x16UnzipLeft:
+ case kArm64S8x16UnzipRight:
+ case kArm64S8x16TransposeLeft:
+ case kArm64S8x16TransposeRight:
+ case kArm64S8x16Concat:
+ case kArm64S8x16Shuffle:
+ case kArm64S32x2Reverse:
+ case kArm64S16x4Reverse:
+ case kArm64S16x2Reverse:
+ case kArm64S8x8Reverse:
+ case kArm64S8x4Reverse:
+ case kArm64S8x2Reverse:
+ case kArm64S1x4AnyTrue:
+ case kArm64S1x4AllTrue:
+ case kArm64S1x8AnyTrue:
+ case kArm64S1x8AllTrue:
+ case kArm64S1x16AnyTrue:
+ case kArm64S1x16AllTrue:
return kNoOpcodeFlags;
case kArm64TestAndBranch32:
@@ -142,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrS:
case kArm64LdrD:
+ case kArm64LdrQ:
case kArm64Ldrb:
case kArm64Ldrsb:
case kArm64Ldrh:
@@ -158,6 +301,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
+ case kArm64StrQ:
case kArm64Strb:
case kArm64Strh:
case kArm64StrW:
@@ -172,7 +316,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 0e9fd0ca2b..f0e306a43c 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -103,13 +103,13 @@ class Arm64OperandGenerator final : public OperandGenerator {
case kArithmeticImm:
return Assembler::IsImmAddSub(value);
case kLoadStoreImm8:
- return IsLoadStoreImmediate(value, LSByte);
+ return IsLoadStoreImmediate(value, 0);
case kLoadStoreImm16:
- return IsLoadStoreImmediate(value, LSHalfword);
+ return IsLoadStoreImmediate(value, 1);
case kLoadStoreImm32:
- return IsLoadStoreImmediate(value, LSWord);
+ return IsLoadStoreImmediate(value, 2);
case kLoadStoreImm64:
- return IsLoadStoreImmediate(value, LSDoubleWord);
+ return IsLoadStoreImmediate(value, 3);
case kNoImmediate:
return false;
case kShift32Imm: // Fall through.
@@ -130,7 +130,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
}
private:
- bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+ bool IsLoadStoreImmediate(int64_t value, unsigned size) {
return Assembler::IsImmLSScaled(value, size) ||
Assembler::IsImmLSUnscaled(value);
}
@@ -153,6 +153,12 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Arm64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
ImmediateMode operand_mode) {
@@ -162,6 +168,14 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
+void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Arm64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
struct ExtendingLoadMatcher {
ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
: matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
@@ -390,7 +404,6 @@ uint8_t GetBinopProperties(InstructionCode opcode) {
break;
default:
UNREACHABLE();
- return 0;
}
DCHECK_IMPLIES(MustCommuteCondField::decode(result),
CanCommuteField::decode(result));
@@ -518,8 +531,8 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
DCHECK(m->IsInt32Mul() || m->IsInt64Mul());
if (m->right().HasValue() && m->right().Value() >= 3) {
uint64_t value_minus_one = m->right().Value() - 1;
- if (base::bits::IsPowerOfTwo64(value_minus_one)) {
- return WhichPowerOf2_64(value_minus_one);
+ if (base::bits::IsPowerOfTwo(value_minus_one)) {
+ return WhichPowerOf2(value_minus_one);
}
}
return 0;
@@ -602,10 +615,10 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kArm64LdrQ;
+ immediate_mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -701,10 +714,10 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kArm64StrQ;
+ immediate_mode = kNoImmediate;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -773,9 +786,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -828,9 +838,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1898,7 +1905,6 @@ FlagsCondition MapForFlagSettingBinop(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -1961,7 +1967,6 @@ FlagsCondition MapForTbz(FlagsCondition cond) {
return kEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -1979,7 +1984,6 @@ FlagsCondition MapForCbz(FlagsCondition cond) {
return kNotEqual;
default:
UNREACHABLE();
- return cond;
}
}
@@ -2396,6 +2400,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2403,7 +2408,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2853,6 +2859,376 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4, 4) \
+ V(16x8, 8) \
+ V(8x16, 16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
+ V(F32x4Abs, kArm64F32x4Abs) \
+ V(F32x4Neg, kArm64F32x4Neg) \
+ V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
+ V(I32x4SConvertI16x8Low, kArm64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kArm64I32x4SConvertI16x8High) \
+ V(I32x4Neg, kArm64I32x4Neg) \
+ V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
+ V(I32x4UConvertI16x8Low, kArm64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kArm64I32x4UConvertI16x8High) \
+ V(I16x8SConvertI8x16Low, kArm64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kArm64I16x8SConvertI8x16High) \
+ V(I16x8Neg, kArm64I16x8Neg) \
+ V(I16x8UConvertI8x16Low, kArm64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kArm64I16x8UConvertI8x16High) \
+ V(I8x16Neg, kArm64I8x16Neg) \
+ V(S128Not, kArm64S128Not) \
+ V(S1x4AnyTrue, kArm64S1x4AnyTrue) \
+ V(S1x4AllTrue, kArm64S1x4AllTrue) \
+ V(S1x8AnyTrue, kArm64S1x8AnyTrue) \
+ V(S1x8AllTrue, kArm64S1x8AllTrue) \
+ V(S1x16AnyTrue, kArm64S1x16AnyTrue) \
+ V(S1x16AllTrue, kArm64S1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kArm64F32x4Add) \
+ V(F32x4AddHoriz, kArm64F32x4AddHoriz) \
+ V(F32x4Sub, kArm64F32x4Sub) \
+ V(F32x4Mul, kArm64F32x4Mul) \
+ V(F32x4Min, kArm64F32x4Min) \
+ V(F32x4Max, kArm64F32x4Max) \
+ V(F32x4Eq, kArm64F32x4Eq) \
+ V(F32x4Ne, kArm64F32x4Ne) \
+ V(F32x4Lt, kArm64F32x4Lt) \
+ V(F32x4Le, kArm64F32x4Le) \
+ V(I32x4Add, kArm64I32x4Add) \
+ V(I32x4AddHoriz, kArm64I32x4AddHoriz) \
+ V(I32x4Sub, kArm64I32x4Sub) \
+ V(I32x4Mul, kArm64I32x4Mul) \
+ V(I32x4MinS, kArm64I32x4MinS) \
+ V(I32x4MaxS, kArm64I32x4MaxS) \
+ V(I32x4Eq, kArm64I32x4Eq) \
+ V(I32x4Ne, kArm64I32x4Ne) \
+ V(I32x4GtS, kArm64I32x4GtS) \
+ V(I32x4GeS, kArm64I32x4GeS) \
+ V(I32x4MinU, kArm64I32x4MinU) \
+ V(I32x4MaxU, kArm64I32x4MaxU) \
+ V(I32x4GtU, kArm64I32x4GtU) \
+ V(I32x4GeU, kArm64I32x4GeU) \
+ V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
+ V(I16x8Add, kArm64I16x8Add) \
+ V(I16x8AddSaturateS, kArm64I16x8AddSaturateS) \
+ V(I16x8AddHoriz, kArm64I16x8AddHoriz) \
+ V(I16x8Sub, kArm64I16x8Sub) \
+ V(I16x8SubSaturateS, kArm64I16x8SubSaturateS) \
+ V(I16x8Mul, kArm64I16x8Mul) \
+ V(I16x8MinS, kArm64I16x8MinS) \
+ V(I16x8MaxS, kArm64I16x8MaxS) \
+ V(I16x8Eq, kArm64I16x8Eq) \
+ V(I16x8Ne, kArm64I16x8Ne) \
+ V(I16x8GtS, kArm64I16x8GtS) \
+ V(I16x8GeS, kArm64I16x8GeS) \
+ V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
+ V(I16x8AddSaturateU, kArm64I16x8AddSaturateU) \
+ V(I16x8SubSaturateU, kArm64I16x8SubSaturateU) \
+ V(I16x8MinU, kArm64I16x8MinU) \
+ V(I16x8MaxU, kArm64I16x8MaxU) \
+ V(I16x8GtU, kArm64I16x8GtU) \
+ V(I16x8GeU, kArm64I16x8GeU) \
+ V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
+ V(I8x16Add, kArm64I8x16Add) \
+ V(I8x16AddSaturateS, kArm64I8x16AddSaturateS) \
+ V(I8x16Sub, kArm64I8x16Sub) \
+ V(I8x16SubSaturateS, kArm64I8x16SubSaturateS) \
+ V(I8x16Mul, kArm64I8x16Mul) \
+ V(I8x16MinS, kArm64I8x16MinS) \
+ V(I8x16MaxS, kArm64I8x16MaxS) \
+ V(I8x16Eq, kArm64I8x16Eq) \
+ V(I8x16Ne, kArm64I8x16Ne) \
+ V(I8x16GtS, kArm64I8x16GtS) \
+ V(I8x16GeS, kArm64I8x16GeS) \
+ V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
+ V(I8x16AddSaturateU, kArm64I8x16AddSaturateU) \
+ V(I8x16SubSaturateU, kArm64I8x16SubSaturateU) \
+ V(I8x16MinU, kArm64I8x16MinU) \
+ V(I8x16MaxU, kArm64I8x16MaxU) \
+ V(I8x16GtU, kArm64I8x16GtU) \
+ V(I8x16GeU, kArm64I8x16GeU) \
+ V(S128And, kArm64S128And) \
+ V(S128Or, kArm64S128Or) \
+ V(S128Xor, kArm64S128Xor)
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kArm64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kArm64##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kArm64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64S128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
+
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle. If
+// successful, writes the 32x4 shuffle indices.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ for (int i = 0; i < 4; i++) {
+ if (shuffle[i * 4] % 4 != 0) return false;
+ for (int j = 1; j < 4; j++) {
+ if (shuffle[i * 4 + j] - shuffle[i * 4 + j - 1] != 1) return false;
+ }
+ shuffle32x4[i] = shuffle[i * 4] / 4;
+ }
+ return true;
+}
+
+// Tries to match byte shuffle to concatenate (vext) operation. If successful,
+// writes the vext immediate value.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* vext) {
+ uint8_t start = shuffle[0];
+ int i = 1;
+ for (; i < 16 - start; i++) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = 16;
+ for (; i < 16; i++, wrap++) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *vext = start;
+ return true;
+}
+
+namespace {
+
+static const int kShuffleLanes = 16;
+static const int kMaxLaneIndex = 15;
+static const int kMaxShuffleIndex = 31;
+
+struct ShuffleEntry {
+ uint8_t shuffle[kShuffleLanes];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kArm64S32x4ZipLeft},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kArm64S32x4ZipRight},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kArm64S32x4UnzipLeft},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kArm64S32x4UnzipRight},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kArm64S32x4TransposeLeft},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 21, 22, 23, 24},
+ kArm64S32x4TransposeRight},
+ {{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11},
+ kArm64S32x2Reverse},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kArm64S16x8ZipLeft},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kArm64S16x8ZipRight},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kArm64S16x8UnzipLeft},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kArm64S16x8UnzipRight},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kArm64S16x8TransposeLeft},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kArm64S16x8TransposeRight},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kArm64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kArm64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kArm64S8x16ZipLeft},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kArm64S8x16ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kArm64S8x16UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kArm64S8x16UnzipRight},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kArm64S8x16TransposeLeft},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kArm64S8x16TransposeRight},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArm64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArm64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kArm64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; i++) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kShuffleLanes; j++) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kShuffleLanes) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = kMaxShuffleIndex;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kMaxLaneIndex;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < 16; i++) {
+ if (shuffle[i] < 16) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kMaxLaneIndex;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kMaxLaneIndex;
+ }
+ }
+ return mask;
+}
+
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; i--) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
+}
+
+void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
+ InstructionOperand* src0, InstructionOperand* src1) {
+ if (input0 == input1) {
+ // Unary, any q-register can be the table.
+ *src0 = *src1 = g->UseRegister(input0);
+ } else {
+ // Binary, table registers must be consecutive.
+ *src0 = g->UseFixed(input0, fp_fixed2);
+ *src1 = g->UseFixed(input1, fp_fixed3);
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ Arm64OperandGenerator g(this);
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t bias;
+ if (TryMatchConcat(shuffle, mask, &bias)) {
+ Emit(kArm64S8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(bias));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kArm64S32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArm64S8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index fd2209ed53..18854dfebe 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -11,7 +11,6 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -34,13 +33,6 @@ class AstGraphBuilder::AstContext BASE_EMBEDDED {
bool IsValue() const { return kind_ == Expression::kValue; }
bool IsTest() const { return kind_ == Expression::kTest; }
- // Determines how to combine the frame state with the value
- // that is about to be plugged into this AstContext.
- OutputFrameStateCombine GetStateCombine() {
- return IsEffect() ? OutputFrameStateCombine::Ignore()
- : OutputFrameStateCombine::Push();
- }
-
// Plug a node into this expression context. Call this function in tail
// position in the Visit functions for expressions.
virtual void ProduceValue(Expression* expr, Node* value) = 0;
@@ -97,14 +89,11 @@ class AstGraphBuilder::AstValueContext final : public AstContext {
// Context to evaluate expression for a condition value (and side effects).
class AstGraphBuilder::AstTestContext final : public AstContext {
public:
- AstTestContext(AstGraphBuilder* owner, TypeFeedbackId feedback_id)
- : AstContext(owner, Expression::kTest), feedback_id_(feedback_id) {}
+ explicit AstTestContext(AstGraphBuilder* owner)
+ : AstContext(owner, Expression::kTest) {}
~AstTestContext() final;
void ProduceValue(Expression* expr, Node* value) final;
Node* ConsumeValue() final;
-
- private:
- TypeFeedbackId const feedback_id_;
};
@@ -286,12 +275,7 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
- state_values_cache_(jsgraph),
- liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
- false, local_zone),
- frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
- info->scope()->num_stack_slots(), info->shared_info())) {
+ state_values_cache_(jsgraph) {
InitializeAstVisitor(info->isolate());
}
@@ -401,10 +385,6 @@ bool AstGraphBuilder::CreateGraph(bool stack_check) {
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
- // Compute local variable liveness information and use it to relax
- // frame states.
- ClearNonLiveSlotsInFrameStates();
-
// Failures indicated by stack overflow.
return !HasStackOverflow();
}
@@ -431,8 +411,7 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build a stack-check before the body.
if (stack_check) {
- Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, BailoutId::FunctionEntry());
+ NewNode(javascript()->StackCheck());
}
// Visit statements in the function body.
@@ -443,33 +422,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
}
-void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
- if (!FLAG_analyze_environment_liveness ||
- !info()->is_deoptimization_enabled()) {
- return;
- }
-
- NonLiveFrameStateSlotReplacer replacer(
- &state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), false, local_zone());
- Variable* arguments = info()->scope()->arguments();
- if (arguments != nullptr && arguments->IsStackAllocated()) {
- replacer.MarkPermanentlyLive(arguments->index());
- }
- liveness_analyzer()->Run(&replacer);
- if (FLAG_trace_environment_liveness) {
- OFStream os(stdout);
- liveness_analyzer()->Print(os);
- }
-}
-
-
-// Gets the bailout id just before reading a variable proxy, but only for
-// unallocated variables.
-static BailoutId BeforeId(VariableProxy* proxy) {
- return proxy->var()->IsUnallocated() ? proxy->BeforeId() : BailoutId::None();
-}
-
static const char* GetDebugParameterName(Zone* zone, DeclarationScope* scope,
int index) {
#if DEBUG
@@ -490,9 +442,6 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
: builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
- liveness_block_(IsLivenessAnalysisEnabled()
- ? builder_->liveness_analyzer()->NewBlock()
- : nullptr),
values_(builder_->local_zone()),
contexts_(builder_->local_zone()),
control_dependency_(control_dependency),
@@ -527,13 +476,10 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
values()->insert(values()->end(), locals_count(), undefined_constant);
}
-
-AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy,
- LivenessAnalyzerBlock* liveness_block)
+AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy)
: builder_(copy->builder_),
parameters_count_(copy->parameters_count_),
locals_count_(copy->locals_count_),
- liveness_block_(liveness_block),
values_(copy->zone()),
contexts_(copy->zone()),
control_dependency_(copy->control_dependency_),
@@ -559,10 +505,6 @@ void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
} else {
DCHECK(variable->IsStackLocal());
values()->at(variable->index() + parameters_count_) = node;
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->Bind(variable->index());
- }
}
}
@@ -575,25 +517,11 @@ Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
return values()->at(variable->index() + 1);
} else {
DCHECK(variable->IsStackLocal());
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->Lookup(variable->index());
- }
return values()->at(variable->index() + parameters_count_);
}
}
-void AstGraphBuilder::Environment::MarkAllLocalsLive() {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- for (int i = 0; i < locals_count_; i++) {
- liveness_block()->Lookup(i);
- }
- }
-}
-
-
void AstGraphBuilder::Environment::RawParameterBind(int index, Node* node) {
DCHECK_LT(index, parameters_count());
values()->at(index) = node;
@@ -608,37 +536,24 @@ Node* AstGraphBuilder::Environment::RawParameterLookup(int index) {
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyForConditional() {
- LivenessAnalyzerBlock* copy_liveness_block = nullptr;
- if (liveness_block() != nullptr) {
- copy_liveness_block =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, copy_liveness_block);
+ return new (zone()) Environment(this);
}
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAsUnreachable() {
- Environment* env = new (zone()) Environment(this, nullptr);
+ Environment* env = new (zone()) Environment(this);
env->MarkAsUnreachable();
return env;
}
AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForOsrEntry() {
- LivenessAnalyzerBlock* copy_block =
- liveness_block() == nullptr ? nullptr
- : builder_->liveness_analyzer()->NewBlock();
- return new (zone()) Environment(this, copy_block);
+ return new (zone()) Environment(this);
}
AstGraphBuilder::Environment*
AstGraphBuilder::Environment::CopyAndShareLiveness() {
- if (liveness_block() != nullptr) {
- // Finish the current liveness block before copying.
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- Environment* env = new (zone()) Environment(this, liveness_block());
+ Environment* env = new (zone()) Environment(this);
return env;
}
@@ -657,63 +572,6 @@ AstGraphBuilder::Environment* AstGraphBuilder::Environment::CopyForLoop(
}
-void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
- int offset, int count) {
- bool should_update = false;
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
- if (*state_values == nullptr || (*state_values)->InputCount() != count) {
- should_update = true;
- } else {
- DCHECK(static_cast<size_t>(offset + count) <= values()->size());
- for (int i = 0; i < count; i++) {
- if ((*state_values)->InputAt(i) != env_values[i]) {
- should_update = true;
- break;
- }
- }
- }
- if (should_update) {
- const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
- (*state_values) = graph()->NewNode(op, count, env_values);
- }
-}
-
-
-Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
- OutputFrameStateCombine combine,
- bool owner_has_exception) {
- if (!builder()->info()->is_deoptimization_enabled()) {
- return builder()->GetEmptyFrameState();
- }
-
- UpdateStateValues(&parameters_node_, 0, parameters_count());
- UpdateStateValues(&locals_node_, parameters_count(), locals_count());
- UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
- stack_height());
-
- const Operator* op = common()->FrameState(
- ast_id, combine, builder()->frame_state_function_info());
-
- Node* result = graph()->NewNode(op, parameters_node_, locals_node_,
- stack_node_, builder()->current_context(),
- builder()->GetFunctionClosure(),
- builder()->graph()->start());
-
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- // If the owning node has an exception, register the checkpoint to the
- // predecessor so that the checkpoint is used for both the normal and the
- // exceptional paths. Yes, this is a terrible hack and we might want
- // to use an explicit frame state for the exceptional path.
- if (owner_has_exception) {
- liveness_block()->GetPredecessor()->Checkpoint(result);
- } else {
- liveness_block()->Checkpoint(result);
- }
- }
- return result;
-}
-
void AstGraphBuilder::Environment::PrepareForLoopExit(
Node* loop, BitVector* assigned_variables) {
if (IsMarkedAsUnreachable()) return;
@@ -743,17 +601,6 @@ void AstGraphBuilder::Environment::PrepareForLoopExit(
UpdateEffectDependency(effect_rename);
}
-bool AstGraphBuilder::Environment::IsLivenessAnalysisEnabled() {
- return FLAG_analyze_environment_liveness &&
- builder()->info()->is_deoptimization_enabled();
-}
-
-
-bool AstGraphBuilder::Environment::IsLivenessBlockConsistent() {
- return (!IsLivenessAnalysisEnabled() || IsMarkedAsUnreachable()) ==
- (liveness_block() == nullptr);
-}
-
AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
Expression::Context kind)
@@ -787,19 +634,16 @@ AstGraphBuilder::AstTestContext::~AstTestContext() {
void AstGraphBuilder::AstEffectContext::ProduceValue(Expression* expr,
Node* value) {
// The value is ignored.
- owner()->PrepareEagerCheckpoint(expr->id());
}
void AstGraphBuilder::AstValueContext::ProduceValue(Expression* expr,
Node* value) {
environment()->Push(value);
- owner()->PrepareEagerCheckpoint(expr->id());
}
void AstGraphBuilder::AstTestContext::ProduceValue(Expression* expr,
Node* value) {
- environment()->Push(owner()->BuildToBoolean(value, feedback_id_));
- owner()->PrepareEagerCheckpoint(expr->id());
+ environment()->Push(owner()->BuildToBoolean(value));
}
@@ -906,7 +750,7 @@ void AstGraphBuilder::VisitForEffect(Expression* expr) {
void AstGraphBuilder::VisitForTest(Expression* expr) {
- AstTestContext for_condition(this, expr->test_id());
+ AstTestContext for_condition(this);
if (!CheckStackOverflow()) {
VisitNoStackOverflowCheck(expr);
} else {
@@ -1133,7 +977,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -1148,7 +992,7 @@ void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
- VisitIterationBody(stmt, &while_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
while_loop.EndLoop();
}
@@ -1165,7 +1009,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
for_loop.BreakUnless(jsgraph()->TrueConstant());
}
- VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
+ VisitIterationBody(stmt, &for_loop);
for_loop.EndBody();
VisitIfNotNull(stmt->next());
for_loop.EndLoop();
@@ -1251,9 +1095,7 @@ void AstGraphBuilder::VisitConditional(Conditional* expr) {
void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
VectorSlotPair pair = CreateVectorSlotPair(expr->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(expr));
- Node* value = BuildVariableLoad(expr->var(), expr->id(), pair,
- ast_context()->GetStateCombine());
+ Node* value = BuildVariableLoad(expr->var(), pair);
ast_context()->ProduceValue(expr, value);
}
@@ -1272,7 +1114,6 @@ void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
expr->pattern(), expr->flags(),
FeedbackVector::GetIndex(expr->literal_slot()));
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, literal);
}
@@ -1285,8 +1126,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
FeedbackVector::GetIndex(expr->literal_slot()), expr->properties_count());
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
// The object is expected on the operand stack during computation of the
// property values and is the value of the entire expression.
@@ -1319,9 +1158,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(0));
- Node* store = BuildNamedStoreOwn(literal, name, value, feedback);
- PrepareFrameState(store, key->id(),
- OutputFrameStateCombine::Ignore());
+ BuildNamedStoreOwn(literal, name, value, feedback);
BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
@@ -1337,9 +1174,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
Node* language = jsgraph()->Constant(SLOPPY);
const Operator* op = javascript()->CallRuntime(Runtime::kSetProperty);
- Node* set_property = NewNode(op, receiver, key, value, language);
- // SetProperty should not lazy deopt on an object literal.
- PrepareFrameState(set_property, BailoutId::None());
+ NewNode(op, receiver, key, value, language);
BuildSetHomeObject(value, receiver, property);
}
break;
@@ -1352,22 +1187,18 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
DCHECK(property->emit_store());
const Operator* op =
javascript()->CallRuntime(Runtime::kInternalSetPrototype);
- Node* set_prototype = NewNode(op, receiver, value);
- // SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
+ NewNode(op, receiver, value);
break;
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1388,8 +1219,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* attr = jsgraph()->Constant(NONE);
const Operator* op =
javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
- Node* call = NewNode(op, literal, name, getter, setter, attr);
- PrepareFrameState(call, it->second->bailout_id);
+ NewNode(op, literal, name, getter, setter, attr);
}
ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -1414,8 +1244,6 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
FeedbackVector::GetIndex(expr->literal_slot()), expr->values()->length());
Node* literal = NewNode(op, closure);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
// The array is expected on the operand stack during computation of the
// element values.
@@ -1434,9 +1262,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(array_index);
Node* literal = environment()->Top();
- Node* store = BuildKeyedStore(literal, index, value, pair);
- PrepareFrameState(store, expr->GetIdForElement(array_index),
- OutputFrameStateCombine::Ignore());
+ BuildKeyedStore(literal, index, value, pair);
}
ast_context()->ProduceValue(expr, environment()->Pop());
@@ -1448,19 +1274,11 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
- bool needs_frame_state_before = true;
// Evaluate LHS expression.
switch (assign_type) {
- case VARIABLE: {
- Variable* variable = expr->target()->AsVariableProxy()->var();
- if (variable->location() == VariableLocation::PARAMETER ||
- variable->location() == VariableLocation::LOCAL ||
- variable->location() == VariableLocation::CONTEXT) {
- needs_frame_state_before = false;
- }
+ case VARIABLE:
break;
- }
case NAMED_PROPERTY:
VisitForValue(property->obj());
break;
@@ -1483,9 +1301,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VariableProxy* proxy = expr->target()->AsVariableProxy();
VectorSlotPair pair =
CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- old_value = BuildVariableLoad(proxy->var(), expr->target()->id(), pair,
- OutputFrameStateCombine::Push());
+ old_value = BuildVariableLoad(proxy->var(), pair);
break;
}
case NAMED_PROPERTY: {
@@ -1494,8 +1310,6 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1504,8 +1318,6 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1517,15 +1329,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VisitForValue(expr->value());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value =
- BuildBinaryOp(left, right, expr->binary_op(),
- expr->binary_operation()->BinaryOperationFeedbackId());
- PrepareFrameState(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push());
+ Node* value = BuildBinaryOp(left, right, expr->binary_op());
environment()->Push(value);
- if (needs_frame_state_before) {
- PrepareEagerCheckpoint(expr->binary_operation()->id());
- }
} else {
VisitForValue(expr->value());
}
@@ -1536,24 +1341,19 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
- BuildVariableAssignment(variable, value, expr->op(), feedback, expr->id(),
- ast_context()->GetStateCombine());
+ BuildVariableAssignment(variable, value, expr->op(), feedback);
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildNamedStore(object, name, value, feedback);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildKeyedStore(object, key, value, feedback);
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1565,16 +1365,25 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
ast_context()->ProduceValue(expr, value);
}
-void AstGraphBuilder::VisitSuspend(Suspend* expr) {
+void AstGraphBuilder::VisitYield(Yield* expr) {
// Generator functions are supported only by going through Ignition first.
UNREACHABLE();
}
+void AstGraphBuilder::VisitYieldStar(YieldStar* expr) {
+ // Generator functions are supported only by going through Ignition first.
+ UNREACHABLE();
+}
+
+void AstGraphBuilder::VisitAwait(Await* expr) {
+ // Generator functions are supported only by going through Ignition first.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- Node* value = BuildThrowError(exception, expr->id());
+ Node* value = BuildThrowError(exception);
ast_context()->ProduceValue(expr, value);
}
@@ -1592,7 +1401,6 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
Node* object = environment()->Pop();
Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case KEYED_PROPERTY: {
@@ -1601,7 +1409,6 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1627,9 +1434,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
case Call::GLOBAL_CALL: {
VariableProxy* proxy = callee->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
+ callee_value = BuildVariableLoad(proxy->var(), pair);
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
break;
@@ -1642,8 +1447,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
Node* object = environment()->Top();
callee_value = BuildNamedLoad(object, name, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -1660,8 +1463,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* key = environment()->Pop();
Node* object = environment()->Top();
callee_value = BuildKeyedLoad(object, key, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
// Note that a property call requires the receiver to be wrapped into
// an object for sloppy callees. However the receiver is guaranteed
// not to be null or undefined at this point.
@@ -1694,17 +1495,9 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to perform the function call.
CallFrequency frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
- const Operator* call =
- javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
- expr->tail_call_mode());
- PrepareEagerCheckpoint(expr->CallId());
+ const Operator* call = javascript()->Call(args->length() + 2, frequency,
+ feedback, receiver_hint);
Node* value = ProcessArguments(call, args->length() + 2);
- // The callee passed to the call, we just need to push something here to
- // satisfy the bailout location contract. The fullcodegen code will not
- // ever look at this value, so we just push optimized_out here.
- environment()->Push(jsgraph()->OptimizedOutConstant());
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- environment()->Drop(1);
ast_context()->ProduceValue(expr, value);
}
@@ -1725,7 +1518,6 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
const Operator* call =
javascript()->Construct(args->length() + 2, frequency, feedback);
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
ast_context()->ProduceValue(expr, value);
}
@@ -1745,9 +1537,7 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// Create node to perform the JS runtime call.
const Operator* call = javascript()->Call(args->length() + 2);
- PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -1766,12 +1556,7 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// Create node to perform the runtime call.
Runtime::FunctionId functionId = expr->function()->function_id;
const Operator* call = javascript()->CallRuntime(functionId, args->length());
- if (expr->function()->intrinsic_type == Runtime::IntrinsicType::RUNTIME ||
- expr->function()->function_id == Runtime::kInlineCall) {
- PrepareEagerCheckpoint(expr->CallId());
- }
Node* value = ProcessArguments(call, args->length());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -1812,9 +1597,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- old_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
+ old_value = BuildVariableLoad(proxy->var(), pair);
stack_depth = 0;
break;
}
@@ -1825,8 +1608,6 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildNamedLoad(object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
stack_depth = 1;
break;
}
@@ -1838,8 +1619,6 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
old_value = BuildKeyedLoad(object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
stack_depth = 2;
break;
}
@@ -1851,12 +1630,9 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// Convert old value into a number.
old_value = NewNode(javascript()->ToNumber(), old_value);
- PrepareFrameState(old_value, expr->ToNumberId(),
- OutputFrameStateCombine::Push());
// Create a proper eager frame state for the stores.
environment()->Push(old_value);
- PrepareEagerCheckpoint(expr->ToNumberId());
old_value = environment()->Pop();
// Save result for postfix expressions at correct stack depth.
@@ -1869,10 +1645,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
// Create node to perform +1/-1 operation.
- Node* value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
- expr->binary_op(), expr->CountBinOpFeedbackId());
- // This should never lazy deopt because we have converted to number before.
- PrepareFrameState(value, BailoutId::None());
+ Node* value =
+ BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
// Store the value.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CountSlot());
@@ -1880,25 +1654,20 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
environment()->Push(value);
- BuildVariableAssignment(variable, value, expr->op(), feedback,
- expr->AssignmentId());
+ BuildVariableAssignment(variable, value, expr->op(), feedback);
environment()->Pop();
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedStore(object, name, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildNamedStore(object, name, value, feedback);
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = BuildKeyedStore(object, key, value, feedback);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ BuildKeyedStore(object, key, value, feedback);
break;
}
case NAMED_SUPER_PROPERTY:
@@ -1926,9 +1695,7 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
VisitForValue(expr->right());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
- Node* value = BuildBinaryOp(left, right, expr->op(),
- expr->BinaryOperationFeedbackId());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ Node* value = BuildBinaryOp(left, right, expr->op());
ast_context()->ProduceValue(expr, value);
}
}
@@ -1951,7 +1718,6 @@ void AstGraphBuilder::VisitLiteralCompareNil(CompareOperation* expr,
VisitForValue(sub_expr);
Node* value_to_compare = environment()->Pop();
Node* value = NewNode(op, value_to_compare, nil_value);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(expr, value);
}
@@ -1962,7 +1728,6 @@ void AstGraphBuilder::VisitLiteralCompareTypeof(CompareOperation* expr,
Node* typeof_arg = NewNode(javascript()->TypeOf(), environment()->Pop());
Node* value = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
typeof_arg, jsgraph()->Constant(check));
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
return ast_context()->ProduceValue(expr, value);
}
@@ -2020,7 +1785,6 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = NewNode(op, left, right);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(expr, value);
}
@@ -2083,8 +1847,7 @@ void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
Node* decls = jsgraph()->Constant(data);
Node* vector = jsgraph()->Constant(feedback_vector);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
- Node* call = NewNode(op, decls, flags, vector);
- PrepareFrameState(call, BailoutId::Declarations());
+ NewNode(op, decls, flags, vector);
globals()->clear();
}
@@ -2094,13 +1857,10 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
Visit(stmt);
}
-
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop,
- BailoutId stack_check_id) {
+ LoopBuilder* loop) {
ControlScopeForIteration scope(this, stmt, loop);
- Node* node = NewNode(javascript()->StackCheck());
- PrepareFrameState(node, stack_check_id);
+ NewNode(javascript()->StackCheck());
Visit(stmt->body());
}
@@ -2112,8 +1872,7 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
// "delete this" is allowed.
Variable* variable = expr->expression()->AsVariableProxy()->var();
DCHECK(is_sloppy(language_mode()) || variable->is_this());
- value = BuildVariableDelete(variable, expr->id(),
- ast_context()->GetStateCombine());
+ value = BuildVariableDelete(variable);
} else if (expr->expression()->IsProperty()) {
Property* property = expr->expression()->AsProperty();
VisitForValue(property->obj());
@@ -2122,7 +1881,6 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
Node* object = environment()->Pop();
Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
value = NewNode(javascript()->DeleteProperty(), object, key, mode);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
value = jsgraph()->TrueConstant();
@@ -2143,10 +1901,7 @@ void AstGraphBuilder::VisitTypeofExpression(Expression* expr) {
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- Node* load =
- BuildVariableLoad(proxy->var(), expr->id(), pair,
- OutputFrameStateCombine::Push(), INSIDE_TYPEOF);
+ Node* load = BuildVariableLoad(proxy->var(), pair, INSIDE_TYPEOF);
environment()->Push(load);
} else {
VisitForValue(expr);
@@ -2194,7 +1949,7 @@ void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
if (ast_context()->IsValue()) {
VisitForValue(expr->left());
Node* left = environment()->Top();
- condition = BuildToBoolean(left, expr->left()->test_id());
+ condition = BuildToBoolean(left);
} else {
VisitForTest(expr->left());
condition = environment()->Top();
@@ -2317,8 +2072,6 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
Handle<ScopeInfo> scope_info = scope->scope_info();
const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* local_context = NewNode(op, GetFunctionClosure());
- PrepareFrameState(local_context, BailoutId::ScriptContext(),
- OutputFrameStateCombine::Push());
return local_context;
}
@@ -2346,26 +2099,23 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
: CreateArgumentsType::kMappedArguments;
const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
- PrepareFrameState(object, BailoutId::None());
// Assign the object to the {arguments} variable. This should never lazy
// deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None());
+ BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair());
return object;
}
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
- Node* not_hole,
- BailoutId bailout_id) {
+ Node* not_hole) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
value, the_hole);
hole_check.If(check);
hole_check.Then();
- Node* error = BuildThrowReferenceError(variable, bailout_id);
+ Node* error = BuildThrowReferenceError(variable);
environment()->Push(error);
hole_check.Else();
environment()->Push(not_hole);
@@ -2373,10 +2123,8 @@ Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
return environment()->Pop();
}
-
Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
- Node* for_hole,
- BailoutId bailout_id) {
+ Node* for_hole) {
IfBuilder hole_check(this);
Node* the_hole = jsgraph()->TheHoleConstant();
Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
@@ -2385,16 +2133,14 @@ Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
hole_check.Then();
environment()->Push(for_hole);
hole_check.Else();
- Node* error = BuildThrowReferenceError(variable, bailout_id);
+ Node* error = BuildThrowReferenceError(variable);
environment()->Push(error);
hole_check.End();
return environment()->Pop();
}
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
- BailoutId bailout_id,
const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
TypeofMode typeof_mode) {
Node* the_hole = jsgraph()->TheHoleConstant();
switch (variable->location()) {
@@ -2403,7 +2149,6 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
Handle<Name> name = variable->name();
if (Node* node = TryLoadGlobalConstant(name)) return node;
Node* value = BuildGlobalLoad(name, feedback, typeof_mode);
- PrepareFrameState(value, bailout_id, combine);
return value;
}
case VariableLocation::PARAMETER:
@@ -2413,9 +2158,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
if (value->op() == the_hole->op()) {
- value = BuildThrowReferenceError(variable, bailout_id);
+ value = BuildThrowReferenceError(variable);
} else if (value->opcode() == IrOpcode::kPhi) {
- value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value);
}
}
return value;
@@ -2436,7 +2181,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
// Maybe specializer should be a parameter to the graph builder?
if (variable->binding_needs_init()) {
// Perform check for uninitialized let/const variables.
- value = BuildHoleCheckThenThrow(value, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(value, variable, value);
}
return value;
}
@@ -2445,13 +2190,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
-
-Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
- BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
// Global var, const, or let variable.
@@ -2460,7 +2201,6 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
const Operator* op = javascript()->DeleteProperty();
Node* result = NewNode(op, global, name, mode);
- PrepareFrameState(result, bailout_id, combine);
return result;
}
case VariableLocation::PARAMETER:
@@ -2474,13 +2214,11 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
-Node* AstGraphBuilder::BuildVariableAssignment(
- Variable* variable, Node* value, Token::Value op,
- const VectorSlotPair& feedback, BailoutId bailout_id,
- OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+ Token::Value op,
+ const VectorSlotPair& feedback) {
Node* the_hole = jsgraph()->TheHoleConstant();
VariableMode mode = variable->mode();
switch (variable->location()) {
@@ -2488,7 +2226,6 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Global var, const, or let variable.
Handle<Name> name = variable->name();
Node* store = BuildGlobalStore(name, value, feedback);
- PrepareFrameState(store, bailout_id, combine);
return store;
}
case VariableLocation::PARAMETER:
@@ -2505,9 +2242,9 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Perform an initialization check for let declared variables.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- return BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable);
} else if (current->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
@@ -2515,7 +2252,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// to trigger bind operations outside the TDZ, via {super} calls.
Node* current = environment()->Lookup(variable);
if (current->op() != the_hole->op() && variable->is_this()) {
- value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckElseThrow(current, variable, value);
}
} else if (mode == CONST && op != Token::INIT &&
variable->is_sloppy_function_name()) {
@@ -2524,20 +2261,20 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// - ignored in sloppy mode.
DCHECK(!variable->binding_needs_init());
if (variable->throw_on_const_assignment(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
return value;
} else if (mode == CONST && op != Token::INIT) {
if (variable->binding_needs_init()) {
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
- return BuildThrowReferenceError(variable, bailout_id);
+ return BuildThrowReferenceError(variable);
} else if (current->opcode() == IrOpcode::kPhi) {
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
}
// Assignment to const is exception in all modes.
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
environment()->Bind(variable, value);
return value;
@@ -2549,7 +2286,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckThenThrow(current, variable, value);
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
// Note that the {this} variable is the only const variable being able
@@ -2558,7 +2295,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
+ value = BuildHoleCheckElseThrow(current, variable, value);
}
} else if (mode == CONST && op != Token::INIT &&
variable->is_sloppy_function_name()) {
@@ -2567,7 +2304,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// - ignored in sloppy mode.
DCHECK(!variable->binding_needs_init());
if (variable->throw_on_const_assignment(language_mode())) {
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
return value;
} else if (mode == CONST && op != Token::INIT) {
@@ -2575,10 +2312,10 @@ Node* AstGraphBuilder::BuildVariableAssignment(
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
Node* current = NewNode(op);
- BuildHoleCheckThenThrow(current, variable, value, bailout_id);
+ BuildHoleCheckThenThrow(current, variable, value);
}
// Assignment to const is exception in all modes.
- return BuildThrowConstAssignError(bailout_id);
+ return BuildThrowConstAssignError();
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
return NewNode(op, value);
@@ -2588,7 +2325,6 @@ Node* AstGraphBuilder::BuildVariableAssignment(
UNREACHABLE();
}
UNREACHABLE();
- return nullptr;
}
@@ -2671,20 +2407,12 @@ Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
return result;
}
-
-Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
+Node* AstGraphBuilder::BuildToBoolean(Node* input) {
if (Node* node = TryFastToBoolean(input)) return node;
ToBooleanHints hints = ToBooleanHint::kAny;
return NewNode(javascript()->ToBoolean(hints), input);
}
-
-Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
- Node* object = NewNode(javascript()->ToObject(), input);
- PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
- return object;
-}
-
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
LiteralProperty* property,
int slot_number) {
@@ -2694,39 +2422,30 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
VectorSlotPair feedback =
CreateVectorSlotPair(property->GetSlot(slot_number));
Node* store = BuildNamedStore(value, name, home_object, feedback);
- PrepareFrameState(store, BailoutId::None(),
- OutputFrameStateCombine::Ignore());
return store;
}
-
-Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowError(Node* exception) {
const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
Node* call = NewNode(op, exception);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
-
-Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
- BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
Node* variable_name = jsgraph()->Constant(variable->name());
const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
Node* call = NewNode(op, variable_name);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
}
-
-Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
+Node* AstGraphBuilder::BuildThrowConstAssignError() {
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowConstAssignError);
Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw());
UpdateControlDependencyToLeaveFunction(control);
return call;
@@ -2753,9 +2472,7 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
return control;
}
-
-Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
- TypeFeedbackId feedback_id) {
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
const Operator* js_op;
BinaryOperationHint hint = BinaryOperationHint::kAny;
switch (op) {
@@ -2837,7 +2554,6 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
- DCHECK_EQ(-1, info()->osr_expr_stack_height());
info()->set_osr_expr_stack_height(environment()->stack_height());
return true;
}
@@ -2845,35 +2561,6 @@ bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
}
-void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- DCHECK(ast_id.IsNone() || info()->shared_info()->VerifyBailoutId(ast_id));
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- bool has_exception = NodeProperties::IsExceptionalCall(node);
- Node* state = environment()->Checkpoint(ast_id, combine, has_exception);
- NodeProperties::ReplaceFrameStateInput(node, state);
- }
-}
-
-void AstGraphBuilder::PrepareEagerCheckpoint(BailoutId ast_id) {
- if (environment()->GetEffectDependency()->opcode() == IrOpcode::kCheckpoint) {
- // We skip preparing a checkpoint if there already is one the current effect
- // dependency. This is just an optimization and not need for correctness.
- return;
- }
- if (ast_id != BailoutId::None()) {
- DCHECK(info()->shared_info()->VerifyBailoutId(ast_id));
- Node* node = NewNode(common()->Checkpoint());
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node)->opcode());
- Node* state = environment()->Checkpoint(ast_id);
- NodeProperties::ReplaceFrameStateInput(node, state);
- }
-}
-
BitVector* AstGraphBuilder::GetVariablesAssignedInLoop(
IterationStatement* stmt) {
if (loop_assignment_analysis_ == nullptr) return nullptr;
@@ -2919,10 +2606,8 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
*current_input++ = current_context();
}
if (has_frame_state) {
- // The frame state will be inserted later. Here we misuse
- // the {Dead} node as a sentinel to be later overwritten
- // with the real frame state.
- *current_input++ = jsgraph()->Dead();
+ DCHECK(!info()->is_deoptimization_enabled());
+ *current_input++ = GetEmptyFrameState();
}
if (has_effect) {
*current_input++ = environment_->GetEffectDependency();
@@ -2971,24 +2656,9 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
effect_dependency_ = other->effect_dependency_;
values_ = other->values_;
contexts_ = other->contexts_;
- if (IsLivenessAnalysisEnabled()) {
- liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(other->liveness_block());
- }
return;
}
- // Record the merge for the local variable liveness calculation.
- // For loops, we are connecting a back edge into the existing block;
- // for merges, we create a new merged block.
- if (IsLivenessAnalysisEnabled()) {
- if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
- liveness_block_ =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- liveness_block()->AddPredecessor(other->liveness_block());
- }
-
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder_->MergeControl(this->GetControlDependency(),
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 1d0ba3a9c2..ad1f1eb3f7 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -8,7 +8,6 @@
#include "src/ast/ast.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
namespace v8 {
@@ -114,12 +113,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
- // Analyzer of local variable liveness.
- LivenessAnalyzer liveness_analyzer_;
-
- // Function info for frame state construction.
- const FrameStateFunctionInfo* const frame_state_function_info_;
-
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -140,10 +133,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
ZoneVector<Handle<Object>>* globals() { return &globals_; }
Scope* current_scope() const;
Node* current_context() const;
- LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
- const FrameStateFunctionInfo* frame_state_function_info() const {
- return frame_state_function_info_;
- }
void set_environment(Environment* env) { environment_ = env; }
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
@@ -221,28 +210,12 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
- // Prepare information for lazy deoptimization. This information is attached
- // to the given node and the output value produced by the node is combined.
- // Conceptually this frame state is "after" a given operation.
- void PrepareFrameState(Node* node, BailoutId ast_id,
- OutputFrameStateCombine framestate_combine =
- OutputFrameStateCombine::Ignore());
-
- // Prepare information for eager deoptimization. This information is carried
- // by dedicated {Checkpoint} nodes that are wired into the effect chain.
- // Conceptually this frame state is "before" a given operation.
- void PrepareEagerCheckpoint(BailoutId ast_id);
-
BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
// Check if the given statement is an OSR entry.
// If so, record the stack height into the compilation and return {true}.
bool CheckOsrEntry(IterationStatement* stmt);
- // Computes local variable liveness and replaces dead variables in
- // frame states with the undefined values.
- void ClearNonLiveSlotsInFrameStates();
-
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
@@ -267,15 +240,9 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* variable, Node* value,
- Token::Value op, const VectorSlotPair& slot,
- BailoutId bailout_id,
- OutputFrameStateCombine framestate_combine =
- OutputFrameStateCombine::Ignore());
- Node* BuildVariableDelete(Variable* variable, BailoutId bailout_id,
- OutputFrameStateCombine framestate_combine);
- Node* BuildVariableLoad(Variable* variable, BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine framestate_combine,
+ Token::Value op, const VectorSlotPair& slot);
+ Node* BuildVariableDelete(Variable* variable);
+ Node* BuildVariableLoad(Variable* variable, const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
// Builders for property loads and stores.
@@ -301,8 +268,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildLoadNativeContextField(int index);
// Builders for automatic type conversion.
- Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
- Node* BuildToObject(Node* input, BailoutId bailout_id);
+ Node* BuildToBoolean(Node* input);
// Builder for adding the [[HomeObject]] to a value if the value came from a
// function literal and needs a home object. Do nothing otherwise.
@@ -310,23 +276,20 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
LiteralProperty* property, int slot_number = 0);
// Builders for error reporting at runtime.
- Node* BuildThrowError(Node* exception, BailoutId bailout_id);
- Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
- Node* BuildThrowConstAssignError(BailoutId bailout_id);
+ Node* BuildThrowError(Node* exception);
+ Node* BuildThrowReferenceError(Variable* var);
+ Node* BuildThrowConstAssignError();
// Builders for dynamic hole-checks at runtime.
- Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
- BailoutId bailout_id);
- Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
- BailoutId bailout_id);
+ Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole);
+ Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole);
// Builders for non-local control flow.
Node* BuildReturn(Node* return_value);
Node* BuildThrow(Node* exception_value);
// Builders for binary operations.
- Node* BuildBinaryOp(Node* left, Node* right, Token::Value op,
- TypeFeedbackId feedback_id);
+ Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
@@ -364,8 +327,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
- void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop,
- BailoutId stack_check_id);
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
// Dispatched from VisitCall.
void VisitCallSuper(Call* expr);
@@ -426,7 +388,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Operations on parameter or local variables.
void Bind(Variable* variable, Node* node);
Node* Lookup(Variable* variable);
- void MarkAllLocalsLive();
// Raw operations on parameter variables.
void RawParameterBind(int index, Node* node);
@@ -476,12 +437,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
values()->erase(values()->end() - depth, values()->end());
}
- // Preserve a checkpoint of the environment for the IR graph. Any
- // further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
- OutputFrameStateCombine::Ignore(),
- bool node_has_exception = false);
-
// Inserts a loop exit control node and renames the environment.
// This is useful for loop peeling to insert phis at loop exits.
void PrepareForLoopExit(Node* loop, BitVector* assigned_variables);
@@ -501,7 +456,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Mark this environment as being unreachable.
void MarkAsUnreachable() {
UpdateControlDependency(builder()->jsgraph()->Dead());
- liveness_block_ = nullptr;
}
bool IsMarkedAsUnreachable() {
return GetControlDependency()->opcode() == IrOpcode::kDead;
@@ -528,7 +482,6 @@ class AstGraphBuilder::Environment : public ZoneObject {
AstGraphBuilder* builder_;
int parameters_count_;
int locals_count_;
- LivenessAnalyzerBlock* liveness_block_;
NodeVector values_;
NodeVector contexts_;
Node* control_dependency_;
@@ -537,19 +490,14 @@ class AstGraphBuilder::Environment : public ZoneObject {
Node* locals_node_;
Node* stack_node_;
- explicit Environment(Environment* copy,
- LivenessAnalyzerBlock* liveness_block);
+ explicit Environment(Environment* copy);
Environment* CopyAndShareLiveness();
- void UpdateStateValues(Node** state_values, int offset, int count);
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
AstGraphBuilder* builder() const { return builder_; }
CommonOperatorBuilder* common() { return builder_->common(); }
NodeVector* values() { return &values_; }
NodeVector* contexts() { return &contexts_; }
- LivenessAnalyzerBlock* liveness_block() { return liveness_block_; }
- bool IsLivenessAnalysisEnabled();
- bool IsLivenessBlockConsistent();
// Prepare environment to be used as loop header.
void PrepareForLoop(BitVector* assigned);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index ff66bf4976..a6e5029573 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -149,11 +149,11 @@ void ALAA::VisitObjectLiteral(ObjectLiteral* e) {
void ALAA::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
-void ALAA::VisitSuspend(Suspend* stmt) {
- Visit(stmt->generator_object());
- Visit(stmt->expression());
-}
+void ALAA::VisitYield(Yield* e) { Visit(e->expression()); }
+
+void ALAA::VisitYieldStar(YieldStar* e) { Visit(e->expression()); }
+void ALAA::VisitAwait(Await* e) { Visit(e->expression()); }
void ALAA::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 40f0a29132..36ffcf1623 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -56,9 +56,9 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
BasicBlockProfiler::Data* data =
info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
// Set the function name.
- if (info->has_shared_info() && info->shared_info()->name()->IsString()) {
+ if (info->has_shared_info()) {
std::ostringstream os;
- String::cast(info->shared_info()->name())->PrintUC16(os);
+ info->shared_info()->name()->PrintUC16(os);
data->SetFunctionName(&os);
}
// Capture the schedule string before instrumentation.
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 96327e7856..b553adf333 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -113,8 +113,7 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
}
return Replace(dead());
}
- return UpdateConditions(
- node, conditions->AddCondition(zone_, condition, condition_is_true));
+ return UpdateConditions(node, conditions, condition, condition_is_true);
}
Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
@@ -128,8 +127,7 @@ Reduction BranchElimination::ReduceIf(Node* node, bool is_true_branch) {
return UpdateConditions(node, nullptr);
}
Node* condition = branch->InputAt(0);
- return UpdateConditions(
- node, from_branch->AddCondition(zone_, condition, is_true_branch));
+ return UpdateConditions(node, from_branch, condition, is_true_branch);
}
@@ -224,6 +222,25 @@ Reduction BranchElimination::UpdateConditions(
return NoChange();
}
+Reduction BranchElimination::UpdateConditions(
+ Node* node, const ControlPathConditions* prev_conditions,
+ Node* current_condition, bool is_true_branch) {
+ const ControlPathConditions* original = node_conditions_.Get(node);
+ DCHECK(prev_conditions != nullptr && current_condition != nullptr);
+ // The control path for the node is the path obtained by appending the
+ // current_condition to the prev_conditions. Check if this new control path
+ // would be the same as the already recorded path (original).
+ if (original == nullptr || !prev_conditions->EqualsAfterAddingCondition(
+ original, current_condition, is_true_branch)) {
+ // If this is the first visit or if the control path is different from the
+ // recorded path create the new control path and record it.
+ const ControlPathConditions* new_condition =
+ prev_conditions->AddCondition(zone_, current_condition, is_true_branch);
+ node_conditions_.Set(node, new_condition);
+ return Changed(node);
+ }
+ return NoChange();
+}
// static
const BranchElimination::ControlPathConditions*
@@ -290,12 +307,8 @@ Maybe<bool> BranchElimination::ControlPathConditions::LookupCondition(
return Nothing<bool>();
}
-
-bool BranchElimination::ControlPathConditions::operator==(
- const ControlPathConditions& other) const {
- if (condition_count_ != other.condition_count_) return false;
- BranchCondition* this_condition = head_;
- BranchCondition* other_condition = other.head_;
+bool BranchElimination::ControlPathConditions::IsSamePath(
+ BranchCondition* this_condition, BranchCondition* other_condition) const {
while (true) {
if (this_condition == other_condition) return true;
if (this_condition->condition != other_condition->condition ||
@@ -306,7 +319,31 @@ bool BranchElimination::ControlPathConditions::operator==(
other_condition = other_condition->next;
}
UNREACHABLE();
- return false;
+}
+
+bool BranchElimination::ControlPathConditions::operator==(
+ const ControlPathConditions& other) const {
+ if (condition_count_ != other.condition_count_) return false;
+ return IsSamePath(head_, other.head_);
+}
+
+bool BranchElimination::ControlPathConditions::EqualsAfterAddingCondition(
+ const ControlPathConditions* other, const Node* new_condition,
+ bool new_branch_direction) const {
+ // When an extra condition is added to the current chain, the count of
+ // the resulting chain would increase by 1. Quick check to see if counts
+ // match.
+ if (other->condition_count_ != condition_count_ + 1) return false;
+
+ // Check if the head of the other chain is same as the new condition that
+ // would be added.
+ if (other->head_->condition != new_condition ||
+ other->head_->is_true != new_branch_direction) {
+ return false;
+ }
+
+ // Check if the rest of the path is the same as the prev_condition.
+ return IsSamePath(other->head_->next, head_);
}
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index c1431523e5..d78933e734 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -23,6 +23,8 @@ class V8_EXPORT_PRIVATE BranchElimination final
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone);
~BranchElimination() final;
+ const char* reducer_name() const override { return "BranchElimination"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -47,6 +49,10 @@ class V8_EXPORT_PRIVATE BranchElimination final
static const ControlPathConditions* Empty(Zone* zone);
void Merge(const ControlPathConditions& other);
+ bool IsSamePath(BranchCondition* first, BranchCondition* second) const;
+ bool EqualsAfterAddingCondition(const ControlPathConditions* other,
+ const Node* new_condition,
+ bool new_branch_condition) const;
bool operator==(const ControlPathConditions& other) const;
bool operator!=(const ControlPathConditions& other) const {
return !(*this == other);
@@ -87,6 +93,9 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction TakeConditionsFromFirstControl(Node* node);
Reduction UpdateConditions(Node* node,
const ControlPathConditions* conditions);
+ Reduction UpdateConditions(Node* node,
+ const ControlPathConditions* prev_conditions,
+ Node* current_condition, bool is_true_branch);
Node* dead() const { return dead_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index e531e75b8c..13185db208 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -28,31 +28,17 @@ void BytecodeLoopAssignments::Add(interpreter::Register r) {
}
}
-void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+void BytecodeLoopAssignments::AddList(interpreter::Register r, uint32_t count) {
if (r.is_parameter()) {
- DCHECK(interpreter::Register(r.index() + 1).is_parameter());
- bit_vector_->Add(r.ToParameterIndex(parameter_count_));
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
- } else {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- bit_vector_->Add(parameter_count_ + r.index());
- bit_vector_->Add(parameter_count_ + r.index() + 1);
- }
-}
-
-void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
- if (r.is_parameter()) {
- DCHECK(interpreter::Register(r.index() + 1).is_parameter());
- DCHECK(interpreter::Register(r.index() + 2).is_parameter());
- bit_vector_->Add(r.ToParameterIndex(parameter_count_));
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
- bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+ for (uint32_t i = 0; i < count; i++) {
+ DCHECK(interpreter::Register(r.index() + i).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + i);
+ }
} else {
- DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
- DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
- bit_vector_->Add(parameter_count_ + r.index());
- bit_vector_->Add(parameter_count_ + r.index() + 1);
- bit_vector_->Add(parameter_count_ + r.index() + 2);
+ for (uint32_t i = 0; i < count; i++) {
+ DCHECK(!interpreter::Register(r.index() + i).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index() + i);
+ }
}
}
@@ -112,6 +98,17 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
break;
}
+ case OperandType::kRegOutList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+ in_liveness.MarkRegisterDead(r.index() + j);
+ }
+ }
+ break;
+ }
case OperandType::kRegOutPair: {
interpreter::Register r = accessor.GetRegisterOperand(i);
if (!r.is_parameter()) {
@@ -227,12 +224,18 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
assignments.Add(accessor.GetRegisterOperand(i));
break;
}
+ case OperandType::kRegOutList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ assignments.AddList(r, reg_count);
+ break;
+ }
case OperandType::kRegOutPair: {
- assignments.AddPair(accessor.GetRegisterOperand(i));
+ assignments.AddList(accessor.GetRegisterOperand(i), 2);
break;
}
case OperandType::kRegOutTriple: {
- assignments.AddTriple(accessor.GetRegisterOperand(i));
+ assignments.AddList(accessor.GetRegisterOperand(i), 3);
break;
}
default:
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 63dfa3107c..68433a4155 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -24,8 +24,7 @@ class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
void Add(interpreter::Register r);
- void AddPair(interpreter::Register r);
- void AddTriple(interpreter::Register r);
+ void AddList(interpreter::Register r, uint32_t count);
void AddAll();
void Union(const BytecodeLoopAssignments& other);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 5bb9a8e976..e1700e6b43 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -57,7 +57,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
- bool owner_has_exception,
const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
@@ -406,7 +405,7 @@ Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
- bool owner_has_exception, const BytecodeLivenessState* liveness) {
+ const BytecodeLivenessState* liveness) {
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
// to match the parameter count.
@@ -522,7 +521,7 @@ VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
return VectorSlotPair(feedback_vector(), slot);
}
-bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+void BytecodeGraphBuilder::CreateGraph(bool stack_check) {
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
@@ -544,8 +543,6 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
Node** const inputs = &exit_controls_.front();
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
-
- return true;
}
void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
@@ -564,7 +561,7 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
bytecode_iterator().current_offset());
Node* frame_state_before = environment()->Checkpoint(
- bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
+ bailout_id, OutputFrameStateCombine::Ignore(), liveness_before);
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
#ifdef DEBUG
} else {
@@ -592,14 +589,13 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
- bool has_exception = NodeProperties::IsExceptionalCall(node);
const BytecodeLivenessState* liveness_after =
bytecode_analysis()->GetOutLivenessFor(
bytecode_iterator().current_offset());
- Node* frame_state_after = environment()->Checkpoint(
- bailout_id, combine, has_exception, liveness_after);
+ Node* frame_state_after =
+ environment()->Checkpoint(bailout_id, combine, liveness_after);
NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
@@ -1006,26 +1002,30 @@ void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
BuildLdaLookupGlobalSlot(TypeofMode::INSIDE_TYPEOF);
}
-void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name =
jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
+ LanguageMode language_mode = static_cast<LanguageMode>(
+ interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
+ bytecode_flags));
+ LookupHoistingMode lookup_hoisting_mode = static_cast<LookupHoistingMode>(
+ interpreter::StoreLookupSlotFlags::LookupHoistingModeBit::decode(
+ bytecode_flags));
+ DCHECK_IMPLIES(lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy,
+ is_sloppy(language_mode));
const Operator* op = javascript()->CallRuntime(
- is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
+ is_strict(language_mode)
+ ? Runtime::kStoreLookupSlot_Strict
+ : lookup_hoisting_mode == LookupHoistingMode::kLegacySloppy
+ ? Runtime::kStoreLookupSlot_SloppyHoisting
+ : Runtime::kStoreLookupSlot_Sloppy);
Node* store = NewNode(op, name, value);
environment()->BindAccumulator(store, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
- BuildStaLookupSlot(LanguageMode::SLOPPY);
-}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
- BuildStaLookupSlot(LanguageMode::STRICT);
-}
-
void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
@@ -1357,8 +1357,7 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
return ProcessCallArguments(call_op, call_args, 2 + arg_count);
}
-void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode,
+void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
Node* const* args, size_t arg_count,
int slot_id) {
DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
@@ -1372,14 +1371,20 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->Call(arg_count, frequency, feedback,
- receiver_mode, tail_call_mode);
- Node* value = ProcessCallArguments(call, args, static_cast<int>(arg_count));
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
+ const Operator* op =
+ javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedCall(
+ op, args, static_cast<int>(arg_count), feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = ProcessCallArguments(op, args, static_cast<int>(arg_count));
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildCallVarArgs(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode) {
+void BytecodeGraphBuilder::BuildCallVarArgs(ConvertReceiverMode receiver_mode) {
DCHECK_EQ(interpreter::Bytecodes::GetReceiverMode(
bytecode_iterator().current_bytecode()),
receiver_mode);
@@ -1410,17 +1415,16 @@ void BytecodeGraphBuilder::BuildCallVarArgs(TailCallMode tail_call_mode,
Node* const* call_args =
GetCallArgumentsFromRegister(callee, receiver_node, first_arg, arg_count);
- BuildCall(tail_call_mode, receiver_mode, call_args,
- static_cast<size_t>(2 + arg_count), slot_id);
+ BuildCall(receiver_mode, call_args, static_cast<size_t>(2 + arg_count),
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallAnyReceiver() {
- BuildCallVarArgs(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
+ BuildCallVarArgs(ConvertReceiverMode::kAny);
}
void BytecodeGraphBuilder::VisitCallProperty() {
- BuildCallVarArgs(TailCallMode::kDisallow,
- ConvertReceiverMode::kNotNullOrUndefined);
+ BuildCallVarArgs(ConvertReceiverMode::kNotNullOrUndefined);
}
void BytecodeGraphBuilder::VisitCallProperty0() {
@@ -1429,8 +1433,8 @@ void BytecodeGraphBuilder::VisitCallProperty0() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
int const slot_id = bytecode_iterator().GetIndexOperand(2);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
- {callee, receiver}, slot_id);
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, {callee, receiver},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallProperty1() {
@@ -1441,8 +1445,8 @@ void BytecodeGraphBuilder::VisitCallProperty1() {
Node* arg0 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
int const slot_id = bytecode_iterator().GetIndexOperand(3);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
- {callee, receiver, arg0}, slot_id);
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined, {callee, receiver, arg0},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallProperty2() {
@@ -1455,13 +1459,12 @@ void BytecodeGraphBuilder::VisitCallProperty2() {
Node* arg1 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(3));
int const slot_id = bytecode_iterator().GetIndexOperand(4);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined,
+ BuildCall(ConvertReceiverMode::kNotNullOrUndefined,
{callee, receiver, arg0, arg1}, slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver() {
- BuildCallVarArgs(TailCallMode::kDisallow,
- ConvertReceiverMode::kNullOrUndefined);
+ BuildCallVarArgs(ConvertReceiverMode::kNullOrUndefined);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver0() {
@@ -1469,8 +1472,7 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver0() {
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* receiver = jsgraph()->UndefinedConstant();
int const slot_id = bytecode_iterator().GetIndexOperand(1);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
- {callee, receiver}, slot_id);
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, {callee, receiver}, slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver1() {
@@ -1480,8 +1482,8 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver1() {
Node* arg0 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
int const slot_id = bytecode_iterator().GetIndexOperand(2);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
- {callee, receiver, arg0}, slot_id);
+ BuildCall(ConvertReceiverMode::kNullOrUndefined, {callee, receiver, arg0},
+ slot_id);
}
void BytecodeGraphBuilder::VisitCallUndefinedReceiver2() {
@@ -1493,7 +1495,7 @@ void BytecodeGraphBuilder::VisitCallUndefinedReceiver2() {
Node* arg1 =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(2));
int const slot_id = bytecode_iterator().GetIndexOperand(3);
- BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNullOrUndefined,
+ BuildCall(ConvertReceiverMode::kNullOrUndefined,
{callee, receiver, arg0, arg1}, slot_id);
}
@@ -1510,14 +1512,6 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitTailCall() {
- TailCallMode tail_call_mode =
- bytecode_array_->GetIsolate()->is_tail_call_elimination_enabled()
- ? TailCallMode::kAllow
- : TailCallMode::kDisallow;
- BuildCallVarArgs(tail_call_mode, ConvertReceiverMode::kAny);
-}
-
void BytecodeGraphBuilder::VisitCallJSRuntime() {
PrepareEagerCheckpoint();
Node* callee =
@@ -1574,28 +1568,63 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Environment::kAttachFrameState);
}
-Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
- const Operator* op, Node* callee, Node* new_target,
- interpreter::Register receiver, size_t reg_count) {
- int arg_count = static_cast<int>(reg_count);
+Node* const* BytecodeGraphBuilder::GetConstructArgumentsFromRegister(
+ Node* target, Node* new_target, interpreter::Register first_arg,
+ int arg_count) {
// arity is args + callee and new target.
int arity = arg_count + 2;
Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
- all[0] = callee;
- int first_arg_index = receiver.index();
+ all[0] = target;
+ int first_arg_index = first_arg.index();
for (int i = 0; i < arg_count; ++i) {
all[1 + i] = environment()->LookupRegister(
interpreter::Register(first_arg_index + i));
}
all[arity - 1] = new_target;
- Node* value = MakeNode(op, arity, all, false);
- return value;
+ return all;
+}
+
+Node* BytecodeGraphBuilder::ProcessConstructArguments(const Operator* op,
+ Node* const* args,
+ int arg_count) {
+ return MakeNode(op, arg_count, args, false);
+}
+
+void BytecodeGraphBuilder::VisitConstruct() {
+ PrepareEagerCheckpoint();
+ interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ // Slot index of 0 is used indicate no feedback slot is available. Assert
+ // the assumption that slot index 0 is never a valid feedback slot.
+ STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
+ int const slot_id = bytecode_iterator().GetIndexOperand(3);
+ VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
+
+ Node* new_target = environment()->LookupAccumulator();
+ Node* callee = environment()->LookupRegister(callee_reg);
+
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
+ const Operator* op = javascript()->Construct(
+ static_cast<uint32_t>(reg_count + 2), frequency, feedback);
+ int arg_count = static_cast<int>(reg_count);
+ Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
+ first_reg, arg_count);
+ Node* node = nullptr;
+ if (Node* simplified = TryBuildSimplifiedConstruct(
+ op, args, static_cast<int>(arg_count), feedback.slot())) {
+ if (environment() == nullptr) return;
+ node = simplified;
+ } else {
+ node = ProcessConstructArguments(op, args, 2 + arg_count);
+ }
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitConstructWithSpread() {
PrepareEagerCheckpoint();
interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
Node* new_target = environment()->LookupAccumulator();
@@ -1603,8 +1632,10 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
const Operator* op =
javascript()->ConstructWithSpread(static_cast<uint32_t>(reg_count + 2));
- Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
- receiver, reg_count);
+ int arg_count = static_cast<int>(reg_count);
+ Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
+ first_reg, arg_count);
+ Node* value = ProcessConstructArguments(op, args, 2 + arg_count);
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -1621,46 +1652,6 @@ void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
-Node* BytecodeGraphBuilder::ProcessConstructArguments(
- const Operator* call_new_op, Node* callee, Node* new_target,
- interpreter::Register receiver, size_t reg_count) {
- int arg_count = static_cast<int>(reg_count);
- // arity is args + callee and new target.
- int arity = arg_count + 2;
- Node** all = local_zone()->NewArray<Node*>(static_cast<size_t>(arity));
- all[0] = callee;
- int first_arg_index = receiver.index();
- for (int i = 0; i < arg_count; ++i) {
- all[1 + i] = environment()->LookupRegister(
- interpreter::Register(first_arg_index + i));
- }
- all[arity - 1] = new_target;
- Node* value = MakeNode(call_new_op, arity, all, false);
- return value;
-}
-
-void BytecodeGraphBuilder::VisitConstruct() {
- PrepareEagerCheckpoint();
- interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
- interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
- size_t reg_count = bytecode_iterator().GetRegisterCountOperand(2);
- // Slot index of 0 is used indicate no feedback slot is available. Assert
- // the assumption that slot index 0 is never a valid feedback slot.
- STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
- int const slot_id = bytecode_iterator().GetIndexOperand(3);
- VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
-
- Node* new_target = environment()->LookupAccumulator();
- Node* callee = environment()->LookupRegister(callee_reg);
-
- CallFrequency frequency = ComputeCallFrequency(slot_id);
- const Operator* call = javascript()->Construct(
- static_cast<uint32_t>(reg_count + 2), frequency, feedback);
- Node* value =
- ProcessConstructArguments(call, callee, new_target, receiver, reg_count);
- environment()->BindAccumulator(value, Environment::kAttachFrameState);
-}
-
void BytecodeGraphBuilder::VisitThrow() {
BuildLoopExitsForFunctionExit();
Node* value = environment()->LookupAccumulator();
@@ -1678,6 +1669,58 @@ void BytecodeGraphBuilder::VisitReThrow() {
MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::BuildHoleCheckAndThrow(
+ Node* condition, Runtime::FunctionId runtime_id, Node* name) {
+ Node* accumulator = environment()->LookupAccumulator();
+ NewBranch(condition, BranchHint::kFalse);
+ {
+ SubEnvironment sub_environment(this);
+
+ NewIfTrue();
+ Node* node;
+ const Operator* op = javascript()->CallRuntime(runtime_id);
+ if (runtime_id == Runtime::kThrowReferenceError) {
+ DCHECK(name != nullptr);
+ node = NewNode(op, name);
+ } else {
+ DCHECK(runtime_id == Runtime::kThrowSuperAlreadyCalledError ||
+ runtime_id == Runtime::kThrowSuperNotCalled);
+ node = NewNode(op);
+ }
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+ Node* control = NewNode(common()->Throw());
+ MergeControlToLeaveFunction(control);
+ }
+ NewIfFalse();
+ environment()->BindAccumulator(accumulator);
+}
+
+void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ Node* name =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowReferenceError, name);
+}
+
+void BytecodeGraphBuilder::VisitThrowSuperNotCalledIfHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ BuildHoleCheckAndThrow(check_for_hole, Runtime::kThrowSuperNotCalled);
+}
+
+void BytecodeGraphBuilder::VisitThrowSuperAlreadyCalledIfNotHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ Node* check_for_not_hole =
+ NewNode(simplified()->BooleanNot(), check_for_hole);
+ BuildHoleCheckAndThrow(check_for_not_hole,
+ Runtime::kThrowSuperAlreadyCalledError);
+}
+
void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
PrepareEagerCheckpoint();
Node* left =
@@ -2091,6 +2134,45 @@ void BytecodeGraphBuilder::VisitToNumber() {
Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitToPrimitiveToString() {
+ PrepareEagerCheckpoint();
+ Node* object = environment()->LookupAccumulator();
+
+ Node* node = nullptr;
+ FeedbackSlot slot =
+ feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
+ if (Node* simplified = TryBuildSimplifiedToPrimitiveToString(object, slot)) {
+ node = simplified;
+ } else {
+ node = NewNode(javascript()->ToPrimitiveToString(), object);
+ }
+
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+ Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitStringConcat() {
+ PrepareEagerCheckpoint();
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(0);
+ int operand_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(1));
+ Node** operands =
+ local_zone()->NewArray<Node*>(static_cast<size_t>(operand_count));
+ int operand_base = first_reg.index();
+ for (int i = 0; i < operand_count; ++i) {
+ Node* reg =
+ environment()->LookupRegister(interpreter::Register(operand_base + i));
+ // Explicitly insert a string check here. All operands are already strings,
+ // however in the case of generator yields in the middle of string
+ // concatenations we might lose the knowledge that the operand is a string.
+ operands[i] = NewNode(simplified()->CheckString(), reg);
+ }
+
+ Node* node = MakeNode(javascript()->StringConcat(operand_count),
+ operand_count, operands, false);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
@@ -2119,12 +2201,6 @@ void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
BuildJumpIfToBooleanFalse();
}
-void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
-
-void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
- BuildJumpIfNotHole();
-}
-
void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
@@ -2218,6 +2294,18 @@ void BytecodeGraphBuilder::VisitDebugger() {
DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
#undef DEBUG_BREAK
+void BytecodeGraphBuilder::VisitIncBlockCounter() {
+ DCHECK(FLAG_block_coverage);
+
+ Node* closure = GetFunctionClosure();
+ Node* coverage_array_slot =
+ jsgraph()->Constant(bytecode_iterator().GetIndexOperand(0));
+
+ const Operator* op = javascript()->CallRuntime(Runtime::kIncBlockCounter);
+
+ NewNode(op, closure, coverage_array_slot);
+}
+
void BytecodeGraphBuilder::VisitForInPrepare() {
PrepareEagerCheckpoint();
Node* receiver =
@@ -2271,15 +2359,18 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
Node* state = environment()->LookupAccumulator();
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
- SuspendFlags flags = interpreter::SuspendGeneratorBytecodeFlags::Decode(
- bytecode_iterator().GetFlagOperand(1));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ // We assume we are storing a range starting from index 0.
+ CHECK_EQ(0, first_reg.index());
+ int register_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+
// The offsets used by the bytecode iterator are relative to a different base
// than what is used in the interpreter, hence the addition.
Node* offset =
jsgraph()->Constant(bytecode_iterator().current_offset() +
(BytecodeArray::kHeaderSize - kHeapObjectTag));
- int register_count = environment()->register_count();
int value_input_count = 3 + register_count;
Node** value_inputs = local_zone()->NewArray<Node*>(value_input_count);
@@ -2291,25 +2382,35 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
environment()->LookupRegister(interpreter::Register(i));
}
- MakeNode(javascript()->GeneratorStore(register_count, flags),
- value_input_count, value_inputs, false);
+ MakeNode(javascript()->GeneratorStore(register_count), value_input_count,
+ value_inputs, false);
}
-void BytecodeGraphBuilder::VisitResumeGenerator() {
+void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
+ Node* state =
+ NewNode(javascript()->GeneratorRestoreContinuation(), generator);
+
+ environment()->BindAccumulator(state, Environment::kAttachFrameState);
+}
+
+void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
+ Node* generator =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ // We assume we are restoring registers starting fromm index 0.
+ CHECK_EQ(0, first_reg.index());
+ int register_count =
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
- for (int i = 0; i < environment()->register_count(); ++i) {
+ for (int i = 0; i < register_count; ++i) {
Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
}
-
- Node* state =
- NewNode(javascript()->GeneratorRestoreContinuation(), generator);
-
- environment()->BindAccumulator(state);
}
void BytecodeGraphBuilder::VisitWide() {
@@ -2327,8 +2428,6 @@ void BytecodeGraphBuilder::VisitIllegal() {
UNREACHABLE();
}
-void BytecodeGraphBuilder::VisitNop() {}
-
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
auto it = merge_environments_.find(current_offset);
if (it != merge_environments_.end()) {
@@ -2539,6 +2638,58 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
return nullptr;
}
+Node* BytecodeGraphBuilder::TryBuildSimplifiedToPrimitiveToString(
+ Node* value, FeedbackSlot slot) {
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction =
+ type_hint_lowering().ReduceToPrimitiveToStringOperation(value, effect,
+ control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedCall(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a CallIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceCallOperation(
+ op, args, arg_count, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
+Node* BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
+ Node* const* args,
+ int arg_count,
+ FeedbackSlot slot) {
+ // TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
+ // pruned from the graph by a soft-deopt. It can happen that a CallIC that
+ // control-dominates the OSR entry is still in "uninitialized" state.
+ if (!osr_ast_id_.IsNone()) return nullptr;
+ Node* effect = environment()->GetEffectDependency();
+ Node* control = environment()->GetControlDependency();
+ Reduction early_reduction = type_hint_lowering().ReduceConstructOperation(
+ op, args, arg_count, effect, control, slot);
+ if (early_reduction.Changed()) {
+ ApplyEarlyReduction(early_reduction);
+ return early_reduction.replacement();
+ }
+ return nullptr;
+}
+
Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
Node* receiver,
FeedbackSlot slot) {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index b963c6a197..52d84b0ddc 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -8,7 +8,6 @@
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-type-hint-lowering.h"
-#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
@@ -35,7 +34,7 @@ class BytecodeGraphBuilder {
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags);
// Creates a graph by visiting bytecodes.
- bool CreateGraph(bool stack_check = true);
+ void CreateGraph(bool stack_check = true);
private:
class Environment;
@@ -125,14 +124,11 @@ class BytecodeGraphBuilder {
int arg_count);
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
interpreter::Register receiver, size_t reg_count);
- Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
- Node* new_target,
- interpreter::Register receiver,
- size_t reg_count);
- Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
- Node* new_target,
- interpreter::Register receiver,
- size_t reg_count);
+ Node* const* GetConstructArgumentsFromRegister(
+ Node* target, Node* new_target, interpreter::Register first_arg,
+ int arg_count);
+ Node* ProcessConstructArguments(const Operator* op, Node* const* args,
+ int arg_count);
Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
interpreter::Register receiver,
size_t reg_count);
@@ -163,15 +159,12 @@ class BytecodeGraphBuilder {
void BuildLdaLookupSlot(TypeofMode typeof_mode);
void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
void BuildLdaLookupGlobalSlot(TypeofMode typeof_mode);
- void BuildStaLookupSlot(LanguageMode language_mode);
- void BuildCallVarArgs(TailCallMode tail_call_mode,
- ConvertReceiverMode receiver_mode);
- void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
- Node* const* args, size_t arg_count, int slot_id);
- void BuildCall(TailCallMode tail_call_mode, ConvertReceiverMode receiver_mode,
+ void BuildCallVarArgs(ConvertReceiverMode receiver_mode);
+ void BuildCall(ConvertReceiverMode receiver_mode, Node* const* args,
+ size_t arg_count, int slot_id);
+ void BuildCall(ConvertReceiverMode receiver_mode,
std::initializer_list<Node*> args, int slot_id) {
- BuildCall(tail_call_mode, receiver_mode, args.begin(), args.size(),
- slot_id);
+ BuildCall(receiver_mode, args.begin(), args.size(), slot_id);
}
void BuildBinaryOp(const Operator* op);
void BuildBinaryOpWithImmediate(const Operator* op);
@@ -179,6 +172,8 @@ class BytecodeGraphBuilder {
void BuildTestingOp(const Operator* op);
void BuildDelete(LanguageMode language_mode);
void BuildCastOperator(const Operator* op);
+ void BuildHoleCheckAndThrow(Node* condition, Runtime::FunctionId runtime_id,
+ Node* name = nullptr);
// Optional early lowering to the simplified operator level. Returns the node
// representing the lowered operation or {nullptr} if no lowering available.
@@ -187,6 +182,11 @@ class BytecodeGraphBuilder {
Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
FeedbackSlot slot);
Node* TryBuildSimplifiedToNumber(Node* input, FeedbackSlot slot);
+ Node* TryBuildSimplifiedToPrimitiveToString(Node* input, FeedbackSlot slot);
+ Node* TryBuildSimplifiedCall(const Operator* op, Node* const* args,
+ int arg_count, FeedbackSlot slot);
+ Node* TryBuildSimplifiedConstruct(const Operator* op, Node* const* args,
+ int arg_count, FeedbackSlot slot);
Node* TryBuildSimplifiedLoadNamed(const Operator* op, Node* receiver,
FeedbackSlot slot);
Node* TryBuildSimplifiedLoadKeyed(const Operator* op, Node* receiver,
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index d8fc12624d..16a7ce8908 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -50,12 +50,6 @@ LinkageLocation regloc(Register reg, MachineType type) {
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif
-#elif V8_TARGET_ARCH_X87
-// ===========================================================================
-// == x87 ====================================================================
-// ===========================================================================
-#define CALLEE_SAVE_REGISTERS esi.bit() | edi.bit() | ebx.bit()
-
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
@@ -161,7 +155,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
msig->parameter_count());
// Check the types of the signature.
// Currently no floating point parameters or returns are allowed because
- // on x87 and ia32, the FP top of stack is involved.
+ // on ia32, the FP top of stack is involved.
for (size_t i = 0; i < msig->return_count(); i++) {
MachineRepresentation rep = msig->GetReturn(i).representation();
CHECK_NE(MachineRepresentation::kFloat32, rep);
diff --git a/deps/v8/src/compiler/check-elimination.cc b/deps/v8/src/compiler/check-elimination.cc
new file mode 100644
index 0000000000..7e7fdd57b5
--- /dev/null
+++ b/deps/v8/src/compiler/check-elimination.cc
@@ -0,0 +1,76 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/check-elimination.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CheckElimination::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kCheckHeapObject:
+ return ReduceCheckHeapObject(node);
+ case IrOpcode::kCheckString:
+ return ReduceCheckString(node);
+ case IrOpcode::kCheckSeqString:
+ return ReduceCheckSeqString(node);
+ case IrOpcode::kCheckNonEmptyString:
+ return ReduceCheckNonEmptyString(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckHeapObject(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsString()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckSeqString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsSeqString()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction CheckElimination::ReduceCheckNonEmptyString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ HeapObjectMatcher m(input);
+ if (m.HasValue() && m.Value()->IsString() &&
+ node != jsgraph()->EmptyStringConstant()) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/check-elimination.h b/deps/v8/src/compiler/check-elimination.h
new file mode 100644
index 0000000000..2854def848
--- /dev/null
+++ b/deps/v8/src/compiler/check-elimination.h
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHECK_ELIMINATION_H_
+#define V8_COMPILER_CHECK_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSGraph;
+
+// Performs elimination of redundant checks within the graph due to inlined
+// constants.
+class V8_EXPORT_PRIVATE CheckElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ explicit CheckElimination(Editor* editor, JSGraph* jsgraph)
+ : AdvancedReducer(editor), jsgraph_(jsgraph) {}
+ ~CheckElimination() final {}
+
+ const char* reducer_name() const override { return "CheckElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceCheckHeapObject(Node* node);
+ Reduction ReduceCheckString(Node* node);
+ Reduction ReduceCheckSeqString(Node* node);
+ Reduction ReduceCheckNonEmptyString(Node* node);
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+
+ JSGraph* jsgraph_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CHECK_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/checkpoint-elimination.h b/deps/v8/src/compiler/checkpoint-elimination.h
index f30eec0f55..87f14c27a6 100644
--- a/deps/v8/src/compiler/checkpoint-elimination.h
+++ b/deps/v8/src/compiler/checkpoint-elimination.h
@@ -20,6 +20,8 @@ class V8_EXPORT_PRIVATE CheckpointElimination final
explicit CheckpointElimination(Editor* editor);
~CheckpointElimination() final {}
+ const char* reducer_name() const override { return "CheckpointElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 19bb76b125..a1cefa1123 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -221,7 +221,7 @@ Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler()->HeapConstant(object);
}
-Node* CodeAssembler::CStringConstant(const char* str) {
+Node* CodeAssembler::StringConstant(const char* str) {
return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
}
@@ -554,10 +554,16 @@ Node* CodeAssembler::Projection(int index, Node* value) {
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Variable* exception_var) {
+ DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
+
+ if (if_exception == nullptr) {
+ // If no handler is supplied, don't add continuations
+ return;
+ }
+
Label success(this), exception(this, Label::kDeferred);
success.MergeVariables();
exception.MergeVariables();
- DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
raw_assembler()->Continuations(node, success.label_, exception.label_);
@@ -620,6 +626,22 @@ Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
+Node* CodeAssembler::TailCallRuntimeN(Runtime::FunctionId function,
+ Node* context, Node* argc) {
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 0, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+
+ Node* nodes[] = {centry, ref, argc, context};
+
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
+}
+
// Instantiate TailCallRuntime() for argument counts used by CSA-generated code
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
@@ -1037,13 +1059,25 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
for (auto var : variable_phis_) {
CodeAssemblerVariable::Impl* var_impl = var.first;
auto i = variable_merges_.find(var_impl);
- // If the following asserts fire, then a variable that has been marked as
- // being merged at the label--either by explicitly marking it so in the
- // label constructor or by having seen different bound values at branches
- // into the label--doesn't have a bound value along all of the paths that
- // have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end());
- DCHECK_EQ(i->second.size(), merge_count_);
+#if DEBUG
+ bool not_found = i == variable_merges_.end();
+ if (not_found || i->second.size() != merge_count_) {
+ std::stringstream str;
+ str << "A variable that has been marked as beeing merged at the label"
+ << "\n# doesn't have a bound value along all of the paths that "
+ << "\n# have been merged into the label up to this point."
+ << "\n#"
+ << "\n# This can happen in the following cases:"
+ << "\n# - By explicitly marking it so in the label constructor"
+ << "\n# - By having seen different bound values at branches"
+ << "\n#"
+ << "\n# Merge count: expected=" << merge_count_
+ << " vs. found=" << (not_found ? 0 : i->second.size())
+ << "\n# Variable: " << *var_impl
+ << "\n# Current Block: " << *label_->block();
+ FATAL(str.str().c_str());
+ }
+#endif // DEBUG
Node* phi = state_->raw_assembler_->Phi(
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
variable_phis_[var_impl] = phi;
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 1f2e4d8f4f..039668ebcf 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -225,7 +225,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* SmiConstant(Smi* value);
Node* SmiConstant(int value);
Node* HeapConstant(Handle<HeapObject> object);
- Node* CStringConstant(const char* str);
+ Node* StringConstant(const char* str);
Node* BooleanConstant(bool value);
Node* ExternalConstant(ExternalReference address);
Node* Float64Constant(double value);
@@ -351,6 +351,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
TArgs... args);
+ // Tail call into the runtime passing the same |argc| stack arguments that we
+ // were called with.
+ Node* TailCallRuntimeN(Runtime::FunctionId function, Node* context,
+ Node* argc);
+
template <class... TArgs>
Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
Node* target = HeapConstant(callable.code());
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 7f09b8524e..c6d3174d8c 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -87,8 +87,8 @@ class InstructionOperandConverter {
return ToExternalReference(instr_->InputAt(index));
}
- Handle<HeapObject> InputHeapObject(size_t index) {
- return ToHeapObject(instr_->InputAt(index));
+ Handle<Code> InputCode(size_t index) {
+ return ToCode(instr_->InputAt(index));
}
Label* InputLabel(size_t index) { return ToLabel(instr_->InputAt(index)); }
@@ -151,7 +151,9 @@ class InstructionOperandConverter {
ConstantOperand::cast(op)->virtual_register());
}
- double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ double ToDouble(InstructionOperand* op) {
+ return ToConstant(op).ToFloat64().value();
+ }
float ToFloat32(InstructionOperand* op) { return ToConstant(op).ToFloat32(); }
@@ -159,8 +161,8 @@ class InstructionOperandConverter {
return ToConstant(op).ToExternalReference();
}
- Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
- return ToConstant(op).ToHeapObject();
+ Handle<Code> ToCode(InstructionOperand* op) {
+ return ToConstant(op).ToCode();
}
const Frame* frame() const { return gen_->frame(); }
@@ -202,15 +204,14 @@ class OutOfLineCode : public ZoneObject {
Label* entry() { return &entry_; }
Label* exit() { return &exit_; }
const Frame* frame() const { return frame_; }
- Isolate* isolate() const { return masm()->isolate(); }
- MacroAssembler* masm() const { return masm_; }
+ TurboAssembler* tasm() { return tasm_; }
OutOfLineCode* next() const { return next_; }
private:
Label entry_;
Label exit_;
const Frame* const frame_;
- MacroAssembler* const masm_;
+ TurboAssembler* const tasm_;
OutOfLineCode* const next_;
};
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 66232aa06f..f09cd73a15 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -35,44 +35,50 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info)
- : frame_access_state_(nullptr),
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position)
+ : zone_(codegen_zone),
+ frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
unwinding_info_writer_(zone()),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
current_block_(RpoNumber::Invalid()),
+ start_source_position_(start_source_position),
current_source_position_(SourcePosition::Unknown()),
- masm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
+ tasm_(info->isolate(), nullptr, 0, CodeObjectRequired::kNo),
resolver_(this),
- safepoints_(code->zone()),
- handlers_(code->zone()),
- deoptimization_exits_(code->zone()),
- deoptimization_states_(code->zone()),
- deoptimization_literals_(code->zone()),
+ safepoints_(zone()),
+ handlers_(zone()),
+ deoptimization_exits_(zone()),
+ deoptimization_states_(zone()),
+ deoptimization_literals_(zone()),
inlined_function_count_(0),
- translations_(code->zone()),
+ translations_(zone()),
last_lazy_deopt_pc_(0),
jump_tables_(nullptr),
ools_(nullptr),
+ osr_helper_(osr_helper),
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
- source_position_table_builder_(code->zone(),
+ source_position_table_builder_(zone(),
info->SourcePositionRecordingMode()),
result_(kSuccess) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
CreateFrameAccessState(frame);
+ CHECK_EQ(info->is_osr(), osr_helper_.has_value());
}
Isolate* CodeGenerator::isolate() const { return info_->isolate(); }
void CodeGenerator::CreateFrameAccessState(Frame* frame) {
FinishFrame(frame);
- frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
+ frame_access_state_ = new (zone()) FrameAccessState(frame);
}
void CodeGenerator::AssembleCode() {
@@ -81,19 +87,18 @@ void CodeGenerator::AssembleCode() {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in AssemblePrologue).
- FrameScope frame_scope(masm(), StackFrame::MANUAL);
+ FrameScope frame_scope(tasm(), StackFrame::MANUAL);
if (info->is_source_positions_enabled()) {
- SourcePosition source_position(info->shared_info()->start_position());
- AssembleSourcePosition(source_position);
+ AssembleSourcePosition(start_source_position());
}
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
- ProfileEntryHookStub::MaybeCallEntryHook(masm());
+ ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
}
// Architecture-specific, linkage-specific prologue.
- info->set_prologue_offset(masm()->pc_offset());
+ info->set_prologue_offset(tasm()->pc_offset());
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -107,16 +112,6 @@ void CodeGenerator::AssembleCode() {
}
inlined_function_count_ = deoptimization_literals_.size();
- // Define deoptimization literals for all unoptimized code objects of inlined
- // functions. This ensures unoptimized code is kept alive by optimized code.
- for (const CompilationInfo::InlinedFunctionHolder& inlined :
- info->inlined_functions()) {
- if (!inlined.shared_info.equals(info->shared_info())) {
- DefineDeoptimizationLiteral(
- DeoptimizationLiteral(inlined.inlined_code_object_root));
- }
- }
-
unwinding_info_writer_.SetNumberOfInstructionBlocks(
code()->InstructionBlockCount());
@@ -127,12 +122,12 @@ void CodeGenerator::AssembleCode() {
continue;
}
// Align loop headers on 16-byte boundaries.
- if (block->IsLoopHeader()) masm()->Align(16);
+ if (block->IsLoopHeader()) tasm()->Align(16);
// Ensure lazy deopt doesn't patch handler entry points.
if (block->IsHandler()) EnsureSpaceForLazyDeopt();
// Bind a label for a block.
current_block_ = block->rpo_number();
- unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
+ unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
Vector<char> buffer = Vector<char>::New(200);
@@ -158,12 +153,12 @@ void CodeGenerator::AssembleCode() {
buffer = buffer.SubVector(next, buffer.length());
}
SNPrintF(buffer, " --");
- masm()->RecordComment(buffer_start);
+ tasm()->RecordComment(buffer_start);
}
frame_access_state()->MarkHasFrame(block->needs_frame());
- masm()->bind(GetLabel(current_block_));
+ tasm()->bind(GetLabel(current_block_));
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -171,12 +166,12 @@ void CodeGenerator::AssembleCode() {
// using the roots.
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
- masm()->InitializeRootRegister();
+ tasm()->InitializeRootRegister();
}
}
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
result_ = AssembleBlock(block);
} else {
result_ = AssembleBlock(block);
@@ -188,25 +183,29 @@ void CodeGenerator::AssembleCode() {
// Assemble all out-of-line code.
if (ools_) {
- masm()->RecordComment("-- Out of line code --");
+ tasm()->RecordComment("-- Out of line code --");
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
- masm()->bind(ool->entry());
+ tasm()->bind(ool->entry());
ool->Generate();
- if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
+ if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
}
}
// Assemble all eager deoptimization exits.
for (DeoptimizationExit* exit : deoptimization_exits_) {
- masm()->bind(exit->label());
- AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
+ tasm()->bind(exit->label());
+ int trampoline_pc = tasm()->pc_offset();
+ int deoptimization_id = exit->deoptimization_id();
+ DeoptimizationState* ds = deoptimization_states_[deoptimization_id];
+ ds->set_trampoline_pc(trampoline_pc);
+ AssembleDeoptimizerCall(deoptimization_id, exit->pos());
}
// Ensure there is space for lazy deoptimization in the code.
if (info->ShouldEnsureSpaceForLazyDeopt()) {
- int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
- while (masm()->pc_offset() < target_offset) {
- masm()->nop();
+ int target_offset = tasm()->pc_offset() + Deoptimizer::patch_size();
+ while (tasm()->pc_offset() < target_offset) {
+ tasm()->nop();
}
}
@@ -214,9 +213,9 @@ void CodeGenerator::AssembleCode() {
// Emit the jump tables.
if (jump_tables_) {
- masm()->Align(kPointerSize);
+ tasm()->Align(kPointerSize);
for (JumpTable* table = jump_tables_; table; table = table->next()) {
- masm()->bind(table->label());
+ tasm()->bind(table->label());
AssembleJumpTable(table->targets(), table->target_count());
}
}
@@ -224,9 +223,9 @@ void CodeGenerator::AssembleCode() {
// The PerfJitLogger logs code up until here, excluding the safepoint
// table. Resolve the unwinding info now so it is aware of the same code size
// as reported by perf.
- unwinding_info_writer_.Finish(masm()->pc_offset());
+ unwinding_info_writer_.Finish(tasm()->pc_offset());
- safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
+ safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
result_ = kSuccess;
}
@@ -234,7 +233,7 @@ Handle<Code> CodeGenerator::FinalizeCode() {
if (result_ != kSuccess) return Handle<Code>();
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), unwinding_info_writer_.eh_frame_writer(), info(),
+ tasm(), unwinding_info_writer_.eh_frame_writer(), info(),
Handle<Object>());
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
@@ -280,7 +279,7 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
Safepoint::Kind kind, int arguments,
Safepoint::DeoptMode deopt_mode) {
Safepoint safepoint =
- safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+ safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
int stackSlotToSpillSlotDelta =
frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
for (const InstructionOperand& operand : references->reference_operands()) {
@@ -308,7 +307,7 @@ bool CodeGenerator::IsMaterializableFromRoot(
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
Heap* heap = isolate()->heap();
return heap->IsRootHandle(object, index_return) &&
- heap->RootCanBeTreatedAsConstant(*index_return);
+ !heap->RootCanBeWrittenAfterInitialization(*index_return);
}
return false;
}
@@ -470,7 +469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
branch.fallthru = true;
// Assemble architecture-specific branch.
AssembleArchBranch(instr, &branch);
- masm()->bind(&continue_label);
+ tasm()->bind(&continue_label);
break;
}
case kFlags_set: {
@@ -500,20 +499,24 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
- source_position_table_builder_.AddPosition(masm()->pc_offset(),
+ source_position_table_builder_.AddPosition(tasm()->pc_offset(),
source_position, false);
if (FLAG_code_comments) {
CompilationInfo* info = this->info();
if (!info->parse_info()) return;
std::ostringstream buffer;
buffer << "-- ";
- if (FLAG_trace_turbo) {
+ if (FLAG_trace_turbo ||
+ tasm()->isolate()->concurrent_recompilation_enabled()) {
buffer << source_position;
} else {
+ AllowHeapAllocation allocation;
+ AllowHandleAllocation handles;
+ AllowHandleDereference deref;
buffer << source_position.InliningStack(info);
}
buffer << " --";
- masm()->RecordComment(StrDup(buffer.str().c_str()));
+ tasm()->RecordComment(StrDup(buffer.str().c_str()));
}
}
@@ -593,22 +596,23 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
if (info->is_osr()) {
DCHECK(osr_pc_offset_ >= 0);
- data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+ data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
} else {
BailoutId osr_ast_id = BailoutId::None();
- data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+ data->SetOsrBytecodeOffset(Smi::FromInt(osr_ast_id.ToInt()));
data->SetOsrPcOffset(Smi::FromInt(-1));
}
// Populate deoptimization entries.
for (int i = 0; i < deopt_count; i++) {
DeoptimizationState* deoptimization_state = deoptimization_states_[i];
- data->SetAstId(i, deoptimization_state->bailout_id());
- CHECK(deoptimization_states_[i]);
+ data->SetBytecodeOffset(i, deoptimization_state->bailout_id());
+ CHECK(deoptimization_state);
data->SetTranslationIndex(
- i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
- data->SetArgumentsStackHeight(i, Smi::kZero);
+ i, Smi::FromInt(deoptimization_state->translation_id()));
+ data->SetTrampolinePc(i,
+ Smi::FromInt(deoptimization_state->trampoline_pc()));
data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
}
@@ -634,7 +638,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (flags & CallDescriptor::kHasExceptionHandler) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
- handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
+ handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
}
if (needs_frame_state) {
@@ -644,19 +648,14 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
- int pc_offset = masm()->pc_offset();
+ int pc_offset = tasm()->pc_offset();
int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
descriptor->state_combine());
- // If the pre-call frame state differs from the post-call one, produce the
- // pre-call frame state, too.
- // TODO(jarin) We might want to avoid building the pre-call frame state
- // because it is only used to get locals and arguments (by the debugger and
- // f.arguments), and those are the same in the pre-call and post-call
- // states.
- if (!descriptor->state_combine().IsOutputIgnored()) {
- deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
- OutputFrameStateCombine::Ignore());
- }
+
+ DeoptimizationExit* const exit = new (zone())
+ DeoptimizationExit(deopt_state_id, current_source_position_);
+ deoptimization_exits_.push_back(exit);
+
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
}
}
@@ -743,12 +742,11 @@ void CodeGenerator::TranslateFrameStateDescriptorOperands(
for (StateValueList::iterator it = values->begin(); it != values->end();
++it, ++index) {
StateValueDescriptor* value_desc = (*it).desc;
- if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+ if (!combine.IsOutputIgnored()) {
// The result of the call should be placed at position
// [index_from_top] in the stack (overwriting whatever was
// previously there).
- size_t index_from_top =
- desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ size_t index_from_top = desc->GetSize() - 1 - combine.GetOffsetToPokeAt();
if (index >= index_from_top &&
index < index_from_top + iter->instruction()->OutputCount()) {
DCHECK_NOT_NULL(translation);
@@ -763,17 +761,7 @@ void CodeGenerator::TranslateFrameStateDescriptorOperands(
}
TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
}
- DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
-
- if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
- DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
- for (size_t output = 0; output < combine.GetPushCount(); output++) {
- // Materialize the result of the call instruction in this slot.
- AddTranslationForOperand(translation, iter->instruction(),
- iter->instruction()->OutputAt(output),
- MachineType::AnyTagged());
- }
- }
+ DCHECK_EQ(desc->GetSize(), index);
}
@@ -798,12 +786,6 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
switch (descriptor->type()) {
- case FrameStateType::kJavaScriptFunction:
- translation->BeginJSFrame(
- descriptor->bailout_id(), shared_info_id,
- static_cast<unsigned int>(descriptor->GetSize(state_combine) -
- (1 + descriptor->parameters_count())));
- break;
case FrameStateType::kInterpretedFunction:
translation->BeginInterpretedFrame(
descriptor->bailout_id(), shared_info_id,
@@ -814,15 +796,28 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
- case FrameStateType::kTailCallerFunction:
- translation->BeginTailCallerFrame(shared_info_id);
- break;
case FrameStateType::kConstructStub:
DCHECK(descriptor->bailout_id().IsValidForConstructStub());
translation->BeginConstructStubFrame(
descriptor->bailout_id(), shared_info_id,
static_cast<unsigned int>(descriptor->parameters_count()));
break;
+ case FrameStateType::kBuiltinContinuation: {
+ BailoutId bailout_id = descriptor->bailout_id();
+ int parameter_count =
+ static_cast<unsigned int>(descriptor->parameters_count());
+ translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
+ parameter_count);
+ break;
+ }
+ case FrameStateType::kJavaScriptBuiltinContinuation: {
+ BailoutId bailout_id = descriptor->bailout_id();
+ int parameter_count =
+ static_cast<unsigned int>(descriptor->parameters_count());
+ translation->BeginJavaScriptBuiltinContinuationFrame(
+ bailout_id, shared_info_id, parameter_count);
+ break;
+ }
case FrameStateType::kGetterStub:
translation->BeginGetterStubFrame(shared_info_id);
break;
@@ -860,7 +855,6 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
return deoptimization_id;
}
-
void CodeGenerator::AddTranslationForOperand(Translation* translation,
Instruction* instr,
InstructionOperand* op,
@@ -968,7 +962,7 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kFloat64:
DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
type.representation() == MachineRepresentation::kTagged);
- literal = DeoptimizationLiteral(constant.ToFloat64());
+ literal = DeoptimizationLiteral(constant.ToFloat64().value());
break;
case Constant::kHeapObject:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
@@ -986,15 +980,15 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
}
}
-
void CodeGenerator::MarkLazyDeoptSite() {
- last_lazy_deopt_pc_ = masm()->pc_offset();
+ last_lazy_deopt_pc_ = tasm()->pc_offset();
}
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
Instruction* instr, size_t frame_state_offset) {
int const deoptimization_id = BuildTranslation(
instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
+
DeoptimizationExit* const exit = new (zone())
DeoptimizationExit(deoptimization_id, current_source_position_);
deoptimization_exits_.push_back(exit);
@@ -1002,11 +996,10 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
}
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
- : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
+ : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
}
-
OutOfLineCode::~OutOfLineCode() {}
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 5d879a28a5..1d8a5a0983 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -5,8 +5,10 @@
#ifndef V8_COMPILER_CODE_GENERATOR_H_
#define V8_COMPILER_CODE_GENERATOR_H_
+#include "src/base/optional.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/osr.h"
#include "src/compiler/unwinding-info-writer.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
@@ -77,13 +79,15 @@ class DeoptimizationLiteral {
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* code, CompilationInfo* info,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
// FinalizeCode returns a null handle.
- void AssembleCode();
+ void AssembleCode(); // Does not need to run on main thread.
Handle<Code> FinalizeCode();
InstructionSequence* code() const { return code_; }
@@ -94,20 +98,25 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
- void AssembleSourcePosition(Instruction* instr);
+ SourcePosition start_source_position() const {
+ return start_source_position_;
+ }
+ void AssembleSourcePosition(Instruction* instr);
void AssembleSourcePosition(SourcePosition source_position);
// Record a safepoint with the given pointer map.
void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
int arguments, Safepoint::DeoptMode deopt_mode);
+ Zone* zone() const { return zone_; }
+
private:
- MacroAssembler* masm() { return &masm_; }
+ TurboAssembler* tasm() { return &tasm_; }
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }
- Zone* zone() const { return code()->zone(); }
CompilationInfo* info() const { return info_; }
+ OsrHelper* osr_helper() { return &(*osr_helper_); }
// Create the FrameAccessState object. The Frame is immutable from here on.
void CreateFrameAccessState(Frame* frame);
@@ -273,13 +282,16 @@ class CodeGenerator final : public GapResolver::Assembler {
translation_id_(translation_id),
pc_offset_(pc_offset),
kind_(kind),
- reason_(reason) {}
+ reason_(reason),
+ trampoline_pc_(-1) {}
BailoutId bailout_id() const { return bailout_id_; }
int translation_id() const { return translation_id_; }
int pc_offset() const { return pc_offset_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ int trampoline_pc() { return trampoline_pc_; }
+ void set_trampoline_pc(int t_pc) { trampoline_pc_ = t_pc; }
private:
BailoutId bailout_id_;
@@ -287,6 +299,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int pc_offset_;
DeoptimizeKind kind_;
DeoptimizeReason reason_;
+ int trampoline_pc_;
};
struct HandlerInfo {
@@ -296,6 +309,7 @@ class CodeGenerator final : public GapResolver::Assembler {
friend class OutOfLineCode;
+ Zone* zone_;
FrameAccessState* frame_access_state_;
Linkage* const linkage_;
InstructionSequence* const code_;
@@ -304,8 +318,9 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* const labels_;
Label return_label_;
RpoNumber current_block_;
+ SourcePosition start_source_position_;
SourcePosition current_source_position_;
- MacroAssembler masm_;
+ TurboAssembler tasm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
ZoneVector<HandlerInfo> handlers_;
@@ -317,6 +332,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int last_lazy_deopt_pc_;
JumpTable* jump_tables_;
OutOfLineCode* ools_;
+ base::Optional<OsrHelper> osr_helper_;
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index acc2092f5d..ea3575aa55 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -29,6 +29,8 @@ class V8_EXPORT_PRIVATE CommonOperatorReducer final
MachineOperatorBuilder* machine);
~CommonOperatorReducer() final {}
+ const char* reducer_name() const override { return "CommonOperatorReducer"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f87c0755b8..f24221d375 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -28,7 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
return os << "False";
}
UNREACHABLE();
- return os;
}
@@ -275,7 +274,6 @@ std::ostream& operator<<(std::ostream& os, RegionObservability observability) {
return os << "not-observable";
}
UNREACHABLE();
- return os;
}
RegionObservability RegionObservabilityOf(Operator const* op) {
@@ -802,7 +800,6 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
return &cache_.kBranchFalseOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
@@ -1161,7 +1158,6 @@ const Operator* CommonOperatorBuilder::BeginRegion(
return &cache_.kBeginRegionNotObservableOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* CommonOperatorBuilder::StateValues(int arguments,
@@ -1325,7 +1321,6 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
return Loop(size);
} else {
UNREACHABLE();
- return nullptr;
}
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 2b51a814fe..2fa1a479b4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -37,7 +37,6 @@ inline BranchHint NegateBranchHint(BranchHint hint) {
return BranchHint::kTrue;
}
UNREACHABLE();
- return hint;
}
inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index d66a9c58d5..10ec4eb042 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -35,7 +35,6 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
return ReduceNode(node);
}
UNREACHABLE();
- return NoChange();
}
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index 1cf9b22833..ede2daac25 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -28,6 +28,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* common);
~DeadCodeElimination() final {}
+ const char* reducer_name() const override { return "DeadCodeElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 6a75e8cff2..36a17fd547 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -536,15 +536,9 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
return;
}
- if (node->opcode() == IrOpcode::kIfSuccess) {
- // We always schedule IfSuccess with its call, so skip it here.
- DCHECK_EQ(IrOpcode::kCall, node->InputAt(0)->opcode());
- // The IfSuccess node should not belong to an exceptional call node
- // because such IfSuccess nodes should only start a basic block (and
- // basic block start nodes are not handled in the ProcessNode method).
- DCHECK(!NodeProperties::IsExceptionalCall(node->InputAt(0)));
- return;
- }
+ // The IfSuccess nodes should always start a basic block (and basic block
+ // start nodes are not handled in the ProcessNode method).
+ DCHECK_NE(IrOpcode::kIfSuccess, node->opcode());
// If the node takes an effect, replace with the current one.
if (node->op()->EffectInputCount() > 0) {
@@ -641,9 +635,18 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckReceiver:
result = LowerCheckReceiver(node, frame_state);
break;
+ case IrOpcode::kCheckSymbol:
+ result = LowerCheckSymbol(node, frame_state);
+ break;
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
+ case IrOpcode::kCheckSeqString:
+ result = LowerCheckSeqString(node, frame_state);
+ break;
+ case IrOpcode::kCheckNonEmptyString:
+ result = LowerCheckNonEmptyString(node, frame_state);
+ break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -763,6 +766,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringCharCodeAt:
result = LowerStringCharCodeAt(node);
break;
+ case IrOpcode::kSeqStringCharCodeAt:
+ result = LowerSeqStringCharCodeAt(node);
+ break;
+ case IrOpcode::kStringToLowerCaseIntl:
+ result = LowerStringToLowerCaseIntl(node);
+ break;
+ case IrOpcode::kStringToUpperCaseIntl:
+ result = LowerStringToUpperCaseIntl(node);
+ break;
case IrOpcode::kStringEqual:
result = LowerStringEqual(node);
break;
@@ -775,8 +787,8 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
- case IrOpcode::kCheckTaggedHole:
- result = LowerCheckTaggedHole(node, frame_state);
+ case IrOpcode::kCheckNotTaggedHole:
+ result = LowerCheckNotTaggedHole(node, frame_state);
break;
case IrOpcode::kConvertTaggedHoleToUndefined:
result = LowerConvertTaggedHoleToUndefined(node);
@@ -805,6 +817,14 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStoreTypedElement:
LowerStoreTypedElement(node);
break;
+ case IrOpcode::kLookupHashStorageIndex:
+ result = LowerLookupHashStorageIndex(node);
+ break;
+ case IrOpcode::kLoadHashMapValue:
+ result = LowerLoadHashMapValue(node);
+ case IrOpcode::kTransitionAndStoreElement:
+ LowerTransitionAndStoreElement(node);
+ break;
case IrOpcode::kFloat64RoundUp:
if (!LowerFloat64RoundUp(node).To(&result)) {
return false;
@@ -1300,6 +1320,17 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+ Node* check =
+ __ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASymbol, check, frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1313,6 +1344,47 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
return value;
}
+Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ Node* is_string = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* is_sequential =
+ __ Word32Equal(__ Word32And(value_instance_type,
+ __ Int32Constant(kStringRepresentationMask)),
+ __ Int32Constant(kSeqStringTag));
+ Node* is_sequential_string = __ Word32And(is_string, is_sequential);
+
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType,
+ is_sequential_string, frame_state);
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckNonEmptyString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+ Node* is_string = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ Node* is_non_empty = __ Word32Equal(
+ __ WordEqual(value, __ EmptyStringConstant()), __ Int32Constant(0));
+
+ Node* is_non_empty_string = __ Word32And(is_string, is_non_empty);
+
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, is_non_empty_string,
+ frame_state);
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
@@ -1767,6 +1839,7 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
Node* node, Node* frame_state) {
+ CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeLabel<1>();
@@ -1780,8 +1853,8 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
__ Bind(&if_not_smi);
- Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+ Node* number =
+ BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
number = __ TruncateFloat64ToWord32(number);
__ Goto(&done, number);
@@ -2063,7 +2136,7 @@ Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
Node* length = NodeProperties::GetValueInput(node, 1);
Callable const callable =
- CodeFactory::NewUnmappedArgumentsElements(isolate());
+ Builtins::CallableFor(isolate(), Builtins::kNewUnmappedArgumentsElements);
Operator::Properties const properties = node->op()->properties();
CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2089,7 +2162,8 @@ Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable = CodeFactory::StringCharAt(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCharAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2102,7 +2176,8 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCharCodeAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2112,6 +2187,33 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ auto one_byte_load = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ __ GotoIf(is_one_byte, &one_byte_load);
+ Node* two_byte_result = __ LoadElement(
+ AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
+ __ Goto(&done, two_byte_result);
+
+ __ Bind(&one_byte_load);
+ Node* one_byte_element = __ LoadElement(
+ AccessBuilder::ForSeqOneByteStringCharacter(), receiver, position);
+ __ Goto(&done, one_byte_element);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
@@ -2161,6 +2263,46 @@ Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
return done.PhiAt(0);
}
+#ifdef V8_INTL_SUPPORT
+
+Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
+ Node* receiver = node->InputAt(0);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringToLowerCaseIntl);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kStringToUpperCaseIntl;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ return __ Call(desc, __ CEntryStubConstant(1), receiver,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+}
+
+#else
+
+Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
+
+#endif // V8_INTL_SUPPORT
+
Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
Node* value = node->InputAt(0);
Node* code = value;
@@ -2291,7 +2433,8 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
Node* search_string = node->InputAt(1);
Node* position = node->InputAt(2);
- Callable callable = CodeFactory::StringIndexOf(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringIndexOf);
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
@@ -2314,16 +2457,18 @@ Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
}
Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
- return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringEqual), node);
}
Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
- return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringLessThan), node);
}
Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
- return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
- node);
+ return LowerStringComparison(
+ Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node);
}
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
@@ -2338,8 +2483,9 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
return value;
}
-Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
- Node* frame_state) {
+
+Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = __ WordEqual(value, __ TheHoleConstant());
__ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
@@ -2484,7 +2630,8 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
__ Bind(&if_not_fixed_array);
// We need to take a copy of the {elements} and set them up for {object}.
Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCopyFastSmiOrObjectElements);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
@@ -2529,8 +2676,10 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
(flags & GrowFastElementsFlag::kDoubleElements)
- ? CodeFactory::GrowFastDoubleElements(isolate())
- : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+ ? Builtins::CallableFor(isolate(),
+ Builtins::kGrowFastDoubleElements)
+ : Builtins::CallableFor(isolate(),
+ Builtins::kGrowFastSmiOrObjectElements);
CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
@@ -2554,7 +2703,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
// Update the "length" property of the {object}.
- __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+ __ StoreField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), object,
object_length);
}
__ Goto(&done, done_grow.PhiAt(0));
@@ -2661,6 +2810,171 @@ void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
storage, index, value);
}
+void EffectControlLinearizer::TransitionElementsTo(Node* node, Node* array,
+ ElementsKind from,
+ ElementsKind to) {
+ DCHECK(IsMoreGeneralElementsKindTransition(from, to));
+ DCHECK(to == HOLEY_ELEMENTS || to == HOLEY_DOUBLE_ELEMENTS);
+
+ Handle<Map> target(to == HOLEY_ELEMENTS ? FastMapParameterOf(node->op())
+ : DoubleMapParameterOf(node->op()));
+ Node* target_map = __ HeapConstant(target);
+
+ if (IsSimpleMapChangeTransition(from, to)) {
+ __ StoreField(AccessBuilder::ForMap(), array, target_map);
+ } else {
+ // Instance migration, call out to the runtime for {array}.
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ __ Call(desc, __ CEntryStubConstant(1), array, target_map,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ }
+}
+
+Node* EffectControlLinearizer::IsElementsKindGreaterThan(
+ Node* kind, ElementsKind reference_kind) {
+ Node* ref_kind = __ Int32Constant(reference_kind);
+ Node* ret = __ Int32LessThan(ref_kind, kind);
+ return ret;
+}
+
+void EffectControlLinearizer::LowerTransitionAndStoreElement(Node* node) {
+ Node* array = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // Possibly transition array based on input and store.
+ //
+ // -- TRANSITION PHASE -----------------
+ // kind = ElementsKind(array)
+ // if value is not smi {
+ // if kind == HOLEY_SMI_ELEMENTS {
+ // if value is heap number {
+ // Transition array to HOLEY_DOUBLE_ELEMENTS
+ // kind = HOLEY_DOUBLE_ELEMENTS
+ // } else {
+ // Transition array to HOLEY_ELEMENTS
+ // kind = HOLEY_ELEMENTS
+ // }
+ // } else if kind == HOLEY_DOUBLE_ELEMENTS {
+ // if value is not heap number {
+ // Transition array to HOLEY_ELEMENTS
+ // kind = HOLEY_ELEMENTS
+ // }
+ // }
+ // }
+ //
+ // -- STORE PHASE ----------------------
+ // [make sure {kind} is up-to-date]
+ // if kind == HOLEY_DOUBLE_ELEMENTS {
+ // if value is smi {
+ // float_value = convert smi to float
+ // Store array[index] = float_value
+ // } else {
+ // float_value = value
+ // Store array[index] = float_value
+ // }
+ // } else {
+ // // kind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS
+ // Store array[index] = value
+ // }
+ //
+ Node* map = __ LoadField(AccessBuilder::ForMap(), array);
+ Node* kind;
+ {
+ Node* bit_field2 = __ LoadField(AccessBuilder::ForMapBitField2(), map);
+ Node* mask = __ Int32Constant(Map::ElementsKindBits::kMask);
+ Node* andit = __ Word32And(bit_field2, mask);
+ Node* shift = __ Int32Constant(Map::ElementsKindBits::kShift);
+ kind = __ Word32Shr(andit, shift);
+ }
+
+ auto do_store = __ MakeLabel<6>(MachineRepresentation::kWord32);
+ Node* check1 = ObjectIsSmi(value);
+ __ GotoIf(check1, &do_store, kind);
+ {
+ // {value} is a HeapObject.
+ Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS);
+ auto if_array_not_fast_smi = __ MakeLabel<1>();
+ __ GotoIf(check2, &if_array_not_fast_smi);
+ {
+ // Transition {array} from HOLEY_SMI_ELEMENTS to HOLEY_DOUBLE_ELEMENTS or
+ // to HOLEY_ELEMENTS.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check3 = __ WordEqual(value_map, heap_number_map);
+ auto if_value_not_heap_number = __ MakeLabel<1>();
+ __ GotoUnless(check3, &if_value_not_heap_number);
+ {
+ // {value} is a HeapNumber.
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS,
+ HOLEY_DOUBLE_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS));
+ }
+ __ Bind(&if_value_not_heap_number);
+ {
+ TransitionElementsTo(node, array, HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
+ }
+ }
+ __ Bind(&if_array_not_fast_smi);
+ {
+ Node* check3 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
+ __ GotoUnless(check3, &do_store, kind);
+ // We have double elements kind.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* heap_number_map = __ HeapNumberMapConstant();
+ Node* check4 = __ WordEqual(value_map, heap_number_map);
+ __ GotoIf(check4, &do_store, kind);
+ // But the value is not a heap number, so we must transition.
+ TransitionElementsTo(node, array, HOLEY_DOUBLE_ELEMENTS, HOLEY_ELEMENTS);
+ __ Goto(&do_store, __ Int32Constant(HOLEY_ELEMENTS));
+ }
+ }
+
+ // Make sure kind is up-to-date.
+ __ Bind(&do_store);
+ kind = do_store.PhiAt(0);
+
+ Node* elements = __ LoadField(AccessBuilder::ForJSObjectElements(), array);
+ Node* check2 = IsElementsKindGreaterThan(kind, HOLEY_ELEMENTS);
+ auto if_kind_is_double = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>();
+ __ GotoIf(check2, &if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_SMI_ELEMENTS or HOLEY_ELEMENTS.
+ __ StoreElement(AccessBuilder::ForFixedArrayElement(HOLEY_ELEMENTS),
+ elements, index, value);
+ __ Goto(&done);
+ }
+ __ Bind(&if_kind_is_double);
+ {
+ // Our ElementsKind is HOLEY_DOUBLE_ELEMENTS.
+ Node* check1 = ObjectIsSmi(value);
+ auto do_double_store = __ MakeLabel<1>();
+ __ GotoUnless(check1, &do_double_store);
+ {
+ Node* int_value = ChangeSmiToInt32(value);
+ Node* float_value = __ ChangeInt32ToFloat64(int_value);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
+ index, float_value);
+ __ Goto(&done);
+ }
+ __ Bind(&do_double_store);
+ {
+ Node* float_value =
+ __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ StoreElement(AccessBuilder::ForFixedDoubleArrayElement(), elements,
+ index, float_value);
+ __ Goto(&done);
+ }
+ }
+ __ Bind(&done);
+}
+
Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
@@ -2987,6 +3301,29 @@ Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
return Just(done.PhiAt(0));
}
+Node* EffectControlLinearizer::LowerLookupHashStorageIndex(Node* node) {
+ Node* table = NodeProperties::GetValueInput(node, 0);
+ Node* key = NodeProperties::GetValueInput(node, 1);
+
+ {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kMapLookupHashIndex);
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), table, key,
+ __ NoContextConstant());
+ }
+}
+
+Node* EffectControlLinearizer::LowerLoadHashMapValue(Node* node) {
+ Node* table = NodeProperties::GetValueInput(node, 0);
+ Node* index = NodeProperties::GetValueInput(node, 1);
+ return __ LoadElement(AccessBuilder::ForFixedArrayElement(), table, index);
+}
+
#undef __
Factory* EffectControlLinearizer::factory() const {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index bc18ff8162..3cde8d795d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -58,6 +58,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerCheckNumber(Node* node, Node* frame_state);
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckSeqString(Node* node, Node* frame_state);
+ Node* LowerCheckNonEmptyString(Node* node, Node* frame_state);
+ Node* LowerCheckSymbol(Node* node, Node* frame_state);
Node* LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
@@ -96,6 +99,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
+ Node* LowerSeqStringCharCodeAt(Node* node);
+ Node* LowerStringToLowerCaseIntl(Node* node);
+ Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
@@ -103,7 +109,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
- Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+ Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -113,6 +119,9 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadTypedElement(Node* node);
void LowerStoreTypedElement(Node* node);
+ Node* LowerLookupHashStorageIndex(Node* node);
+ Node* LowerLoadHashMapValue(Node* node);
+ void LowerTransitionAndStoreElement(Node* node);
// Lowering of optional operators.
Maybe<Node*> LowerFloat64RoundUp(Node* node);
@@ -128,6 +137,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
Node* LowerStringComparison(Callable const& callable, Node* node);
+ Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
@@ -136,6 +146,8 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
+ void TransitionElementsTo(Node* node, Node* array, ElementsKind from,
+ ElementsKind to);
Factory* factory() const;
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 4373fa4c66..9bbabeb221 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -24,6 +24,8 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis, Zone* zone);
+ const char* reducer_name() const override { return "EscapeAnalysisReducer"; }
+
Reduction Reduce(Node* node) final;
void Finalize() override;
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 52935e0041..97710de6c5 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -833,7 +833,10 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kPlainPrimitiveToFloat64:
case IrOpcode::kStringCharAt:
case IrOpcode::kStringCharCodeAt:
+ case IrOpcode::kSeqStringCharCodeAt:
case IrOpcode::kStringIndexOf:
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl:
case IrOpcode::kObjectIsDetectableCallable:
case IrOpcode::kObjectIsNaN:
case IrOpcode::kObjectIsNonCallable:
@@ -857,13 +860,9 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
}
break;
default:
- if (use->op()->EffectInputCount() == 0 &&
- uses->op()->EffectInputCount() > 0 &&
- !IrOpcode::IsJsOpcode(use->opcode())) {
- V8_Fatal(__FILE__, __LINE__,
- "Encountered unaccounted use by #%d (%s)\n", use->id(),
- use->op()->mnemonic());
- }
+ DCHECK(use->op()->EffectInputCount() > 0 ||
+ uses->op()->EffectInputCount() == 0 ||
+ IrOpcode::IsJsOpcode(use->opcode()));
if (SetEscaped(rep)) {
TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
rep->id(), rep->op()->mnemonic(), use->id(),
@@ -1532,8 +1531,8 @@ void EscapeAnalysis::ProcessCheckMaps(Node* node) {
// CheckMapsValue operator that takes the load-eliminated map value as
// input.
if (value->opcode() == IrOpcode::kHeapConstant &&
- params.maps().contains(ZoneHandleSet<Map>(
- Handle<Map>::cast(OpParameter<Handle<HeapObject>>(value))))) {
+ params.maps().contains(ZoneHandleSet<Map>(bit_cast<Handle<Map>>(
+ OpParameter<Handle<HeapObject>>(value))))) {
TRACE("CheckMaps #%i seems to be redundant (until now).\n",
node->id());
return;
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index ec014dac94..4031f38186 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -5,6 +5,10 @@
#include "src/compiler/frame-states.h"
#include "src/base/functional.h"
+#include "src/callable.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
@@ -13,20 +17,14 @@ namespace internal {
namespace compiler {
size_t hash_value(OutputFrameStateCombine const& sc) {
- return base::hash_combine(sc.kind_, sc.parameter_);
+ return base::hash_value(sc.parameter_);
}
std::ostream& operator<<(std::ostream& os, OutputFrameStateCombine const& sc) {
- switch (sc.kind_) {
- case OutputFrameStateCombine::kPushOutput:
- if (sc.parameter_ == 0) return os << "Ignore";
- return os << "Push(" << sc.parameter_ << ")";
- case OutputFrameStateCombine::kPokeAt:
- return os << "PokeAt(" << sc.parameter_ << ")";
- }
- UNREACHABLE();
- return os;
+ if (sc.parameter_ == OutputFrameStateCombine::kInvalidIndex)
+ return os << "Ignore";
+ return os << "PokeAt(" << sc.parameter_ << ")";
}
@@ -50,21 +48,21 @@ size_t hash_value(FrameStateInfo const& info) {
std::ostream& operator<<(std::ostream& os, FrameStateType type) {
switch (type) {
- case FrameStateType::kJavaScriptFunction:
- os << "JS_FRAME";
- break;
case FrameStateType::kInterpretedFunction:
os << "INTERPRETED_FRAME";
break;
case FrameStateType::kArgumentsAdaptor:
os << "ARGUMENTS_ADAPTOR";
break;
- case FrameStateType::kTailCallerFunction:
- os << "TAIL_CALLER_FRAME";
- break;
case FrameStateType::kConstructStub:
os << "CONSTRUCT_STUB";
break;
+ case FrameStateType::kBuiltinContinuation:
+ os << "BUILTIN_CONTINUATION_FRAME";
+ break;
+ case FrameStateType::kJavaScriptBuiltinContinuation:
+ os << "JAVA_SCRIPT_BUILTIN_CONTINUATION_FRAME";
+ break;
case FrameStateType::kGetterStub:
os << "GETTER_STUB";
break;
@@ -86,6 +84,116 @@ std::ostream& operator<<(std::ostream& os, FrameStateInfo const& info) {
return os;
}
+namespace {
+Node* CreateBuiltinContinuationFrameStateCommon(
+ JSGraph* js_graph, Builtins::Name name, Node* context, Node** parameters,
+ int parameter_count, Node* outer_frame_state, Handle<JSFunction> function) {
+ Isolate* isolate = js_graph->isolate();
+ Graph* graph = js_graph->graph();
+ CommonOperatorBuilder* common = js_graph->common();
+
+ BailoutId bailout_id = Builtins::GetContinuationBailoutId(name);
+ Callable callable = Builtins::CallableFor(isolate, name);
+
+ const Operator* op_param =
+ common->StateValues(parameter_count, SparseInputMask::Dense());
+ Node* params_node = graph->NewNode(op_param, parameter_count, parameters);
+
+ FrameStateType frame_type =
+ function.is_null() ? FrameStateType::kBuiltinContinuation
+ : FrameStateType::kJavaScriptBuiltinContinuation;
+ const FrameStateFunctionInfo* state_info =
+ common->CreateFrameStateFunctionInfo(
+ frame_type, parameter_count, 0,
+ function.is_null() ? Handle<SharedFunctionInfo>()
+ : Handle<SharedFunctionInfo>(function->shared()));
+ const Operator* op = common->FrameState(
+ bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+
+ Node* function_node = function.is_null() ? js_graph->UndefinedConstant()
+ : js_graph->HeapConstant(function);
+
+ Node* frame_state = graph->NewNode(
+ op, params_node, js_graph->EmptyStateValues(),
+ js_graph->EmptyStateValues(), context, function_node, outer_frame_state);
+
+ return frame_state;
+}
+} // namespace
+
+Node* CreateStubBuiltinContinuationFrameState(JSGraph* js_graph,
+ Builtins::Name name,
+ Node* context, Node** parameters,
+ int parameter_count,
+ Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = js_graph->isolate();
+ Callable callable = Builtins::CallableFor(isolate, name);
+ CallInterfaceDescriptor descriptor = callable.descriptor();
+
+ std::vector<Node*> actual_parameters;
+ // Stack parameters first. If the deoptimization is LAZY, the final parameter
+ // is added by the deoptimizer and isn't explicitly passed in the frame state.
+ int stack_parameter_count =
+ descriptor.GetRegisterParameterCount() -
+ (mode == ContinuationFrameStateMode::LAZY ? 1 : 0);
+ for (int i = 0; i < stack_parameter_count; ++i) {
+ actual_parameters.push_back(
+ parameters[descriptor.GetRegisterParameterCount() + i]);
+ }
+ // Register parameters follow, context will be added by instruction selector
+ // during FrameState translation.
+ for (int i = 0; i < descriptor.GetRegisterParameterCount(); ++i) {
+ actual_parameters.push_back(parameters[i]);
+ }
+
+ return CreateBuiltinContinuationFrameStateCommon(
+ js_graph, name, context, actual_parameters.data(),
+ static_cast<int>(actual_parameters.size()), outer_frame_state,
+ Handle<JSFunction>());
+}
+
+Node* CreateJavaScriptBuiltinContinuationFrameState(
+ JSGraph* js_graph, Handle<JSFunction> function, Builtins::Name name,
+ Node* target, Node* context, Node** stack_parameters,
+ int stack_parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode) {
+ Isolate* isolate = js_graph->isolate();
+ Callable callable = Builtins::CallableFor(isolate, name);
+
+ // Lazy deopt points where the frame state is assocated with a call get an
+ // additional parameter for the return result from the call that's added by
+ // the deoptimizer and not explicitly specified in the frame state. Check that
+ // there is not a mismatch between the number of frame state parameters and
+ // the stack parameters required by the builtin taking this into account.
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(isolate, name) + 1, // add receiver
+ stack_parameter_count +
+ (mode == ContinuationFrameStateMode::EAGER ? 0 : 1));
+
+ Node* argc =
+ js_graph->Constant(stack_parameter_count -
+ (mode == ContinuationFrameStateMode::EAGER ? 1 : 0));
+
+ // Stack parameters first. They must be first because the receiver is expected
+ // to be the second value in the translation when creating stack crawls
+ // (e.g. Error.stack) of optimized JavaScript frames.
+ std::vector<Node*> actual_parameters;
+ for (int i = 0; i < stack_parameter_count; ++i) {
+ actual_parameters.push_back(stack_parameters[i]);
+ }
+
+ // Register parameters follow stack paraemters. The context will be added by
+ // instruction selector during FrameState translation.
+ actual_parameters.push_back(target);
+ actual_parameters.push_back(js_graph->UndefinedConstant());
+ actual_parameters.push_back(argc);
+
+ return CreateBuiltinContinuationFrameStateCommon(
+ js_graph, name, context, &actual_parameters[0],
+ static_cast<int>(actual_parameters.size()), outer_frame_state, function);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index 0d0ec47f88..4e25fa026b 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -5,57 +5,43 @@
#ifndef V8_COMPILER_FRAME_STATES_H_
#define V8_COMPILER_FRAME_STATES_H_
+#include "src/builtins/builtins.h"
#include "src/handles.h"
+#include "src/objects/shared-function-info.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
-// Forward declarations.
-class SharedFunctionInfo;
-
namespace compiler {
+class JSGraph;
+class Node;
+
// Flag that describes how to combine the current environment with
// the output of a node to obtain a framestate for lazy bailout.
class OutputFrameStateCombine {
public:
- enum Kind {
- kPushOutput, // Push the output on the expression stack.
- kPokeAt // Poke at the given environment location,
- // counting from the top of the stack.
- };
+ static const size_t kInvalidIndex = SIZE_MAX;
static OutputFrameStateCombine Ignore() {
- return OutputFrameStateCombine(kPushOutput, 0);
- }
- static OutputFrameStateCombine Push(size_t count = 1) {
- return OutputFrameStateCombine(kPushOutput, count);
+ return OutputFrameStateCombine(kInvalidIndex);
}
static OutputFrameStateCombine PokeAt(size_t index) {
- return OutputFrameStateCombine(kPokeAt, index);
+ return OutputFrameStateCombine(index);
}
- Kind kind() const { return kind_; }
- size_t GetPushCount() const {
- DCHECK_EQ(kPushOutput, kind());
- return parameter_;
- }
size_t GetOffsetToPokeAt() const {
- DCHECK_EQ(kPokeAt, kind());
+ DCHECK_NE(parameter_, kInvalidIndex);
return parameter_;
}
- bool IsOutputIgnored() const {
- return kind_ == kPushOutput && parameter_ == 0;
- }
+ bool IsOutputIgnored() const { return parameter_ == kInvalidIndex; }
- size_t ConsumedOutputCount() const {
- return kind_ == kPushOutput ? GetPushCount() : 1;
- }
+ size_t ConsumedOutputCount() const { return IsOutputIgnored() ? 0 : 1; }
bool operator==(OutputFrameStateCombine const& other) const {
- return kind_ == other.kind_ && parameter_ == other.parameter_;
+ return parameter_ == other.parameter_;
}
bool operator!=(OutputFrameStateCombine const& other) const {
return !(*this == other);
@@ -66,23 +52,22 @@ class OutputFrameStateCombine {
OutputFrameStateCombine const&);
private:
- OutputFrameStateCombine(Kind kind, size_t parameter)
- : kind_(kind), parameter_(parameter) {}
+ explicit OutputFrameStateCombine(size_t parameter) : parameter_(parameter) {}
- Kind const kind_;
size_t const parameter_;
};
// The type of stack frame that a FrameState node represents.
enum class FrameStateType {
- kJavaScriptFunction, // Represents an unoptimized JavaScriptFrame.
kInterpretedFunction, // Represents an InterpretedFrame.
kArgumentsAdaptor, // Represents an ArgumentsAdaptorFrame.
- kTailCallerFunction, // Represents a frame removed by tail call elimination.
kConstructStub, // Represents a ConstructStubFrame.
kGetterStub, // Represents a GetterStubFrame.
- kSetterStub // Represents a SetterStubFrame.
+ kSetterStub, // Represents a SetterStubFrame.
+ kBuiltinContinuation, // Represents a continuation to a stub.
+ kJavaScriptBuiltinContinuation // Represents a continuation to a JavaScipt
+ // builtin.
};
class FrameStateFunctionInfo {
@@ -101,8 +86,8 @@ class FrameStateFunctionInfo {
FrameStateType type() const { return type_; }
static bool IsJSFunctionType(FrameStateType type) {
- return type == FrameStateType::kJavaScriptFunction ||
- type == FrameStateType::kInterpretedFunction;
+ return type == FrameStateType::kInterpretedFunction ||
+ type == FrameStateType::kJavaScriptBuiltinContinuation;
}
private:
@@ -122,7 +107,7 @@ class FrameStateInfo final {
info_(info) {}
FrameStateType type() const {
- return info_ == nullptr ? FrameStateType::kJavaScriptFunction
+ return info_ == nullptr ? FrameStateType::kInterpretedFunction
: info_->type();
}
BailoutId bailout_id() const { return bailout_id_; }
@@ -160,6 +145,21 @@ static const int kFrameStateFunctionInput = 4;
static const int kFrameStateOuterStateInput = 5;
static const int kFrameStateInputCount = kFrameStateOuterStateInput + 1;
+enum class ContinuationFrameStateMode { EAGER, LAZY };
+
+Node* CreateStubBuiltinContinuationFrameState(JSGraph* graph,
+ Builtins::Name name,
+ Node* context, Node** parameters,
+ int parameter_count,
+ Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
+
+Node* CreateJavaScriptBuiltinContinuationFrameState(
+ JSGraph* graph, Handle<JSFunction> function, Builtins::Name name,
+ Node* target, Node* context, Node** stack_parameters,
+ int stack_parameter_count, Node* outer_frame_state,
+ ContinuationFrameStateMode mode);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index be90a33a21..05131abeea 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -102,7 +102,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
}
if (!kSimpleFPAliasing) {
- if (reps && !base::bits::IsPowerOfTwo32(reps)) {
+ if (reps && !base::bits::IsPowerOfTwo(reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
if ((reps & kFloat32Bit) != 0) {
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 12746c2b13..a91b83a035 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -224,7 +224,8 @@ void GraphAssembler::Reset(Node* effect, Node* control) {
Operator const* GraphAssembler::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ Callable callable =
+ Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index cf4d9154e4..faf01e9d9e 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -89,11 +89,22 @@ Reduction GraphReducer::Reduce(Node* const node) {
// {replacement} == {node} represents an in-place reduction. Rerun
// all the other reducers for this node, as now there may be more
// opportunities for reduction.
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- In-place update of " << *node << " by reducer "
+ << (*i)->reducer_name() << std::endl;
+ }
skip = i;
i = reducers_.begin();
continue;
} else {
// {node} was replaced by another node.
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- Replacement of " << *node << " with "
+ << *(reduction.replacement()) << " by reducer "
+ << (*i)->reducer_name() << std::endl;
+ }
return reduction;
}
}
@@ -146,10 +157,6 @@ void GraphReducer::ReduceTop() {
// Check if the reduction is an in-place update of the {node}.
Node* const replacement = reduction.replacement();
if (replacement == node) {
- if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- In-place update of " << *replacement << std::endl;
- }
// In-place update of {node}, may need to recurse on an input.
Node::Inputs node_inputs = node->inputs();
for (int i = 0; i < node_inputs.count(); ++i) {
@@ -183,10 +190,6 @@ void GraphReducer::Replace(Node* node, Node* replacement) {
void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
- if (FLAG_trace_turbo_reduction) {
- OFStream os(stdout);
- os << "- Replacing " << *node << " with " << *replacement << std::endl;
- }
if (node == graph()->start()) graph()->SetStart(replacement);
if (node == graph()->end()) graph()->SetEnd(replacement);
if (replacement->id() <= max_id) {
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index d271881872..517f71e955 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -46,6 +46,9 @@ class V8_EXPORT_PRIVATE Reducer {
public:
virtual ~Reducer() {}
+ // Only used for tracing, when using the --trace_turbo_reduction flag.
+ virtual const char* reducer_name() const = 0;
+
// Try to reduce a node if possible.
virtual Reduction Reduce(Node* node) = 0;
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index 60af4789bc..3c5c9c4de8 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -5,6 +5,8 @@
#ifndef V8_COMPILER_GRAPH_H_
#define V8_COMPILER_GRAPH_H_
+#include <array>
+
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -62,58 +64,11 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
Node* NewNode(const Operator* op, int input_count, Node* const* inputs,
bool incomplete = false);
- // Factories for nodes with static input counts.
- Node* NewNode(const Operator* op) {
- return NewNode(op, 0, static_cast<Node* const*>(nullptr));
- }
- Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* nodes[] = {n1, n2};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* nodes[] = {n1, n2, n3};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* nodes[] = {n1, n2, n3, n4};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* nodes[] = {n1, n2, n3, n4, n5};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
- return NewNode(op, arraysize(nodes), nodes);
- }
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
- Node* n11) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
- return NewNode(op, arraysize(nodes), nodes);
+ // Factory template for nodes with static input counts.
+ template <typename... Nodes>
+ Node* NewNode(const Operator* op, Nodes*... nodes) {
+ std::array<Node*, sizeof...(nodes)> nodes_arr{{nodes...}};
+ return NewNode(op, nodes_arr.size(), nodes_arr.data());
}
// Clone the {node}, and assign a new node id to the copy.
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index dabdab3810..2c20ae9ddb 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -18,8 +18,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchDoubleReg xmm0
@@ -74,11 +73,9 @@ class IA32OperandConverter : public InstructionOperandConverter {
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kFloat32:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Immediate::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Immediate::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
@@ -89,7 +86,6 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate::CodeRelativeOffset(ToLabel(operand));
}
UNREACHABLE();
- return Immediate(-1);
}
static size_t NextOffset(size_t* offset) {
@@ -165,10 +161,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
}
case kMode_None:
UNREACHABLE();
- return Operand(no_reg, 0);
}
UNREACHABLE();
- return Operand(no_reg, 0);
}
Operand MemoryOperand(size_t first_input = 0) {
@@ -226,18 +220,22 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
XMMRegister input)
- : OutOfLineCode(gen), result_(result), input_(input) {}
+ : OutOfLineCode(gen),
+ result_(result),
+ input_(input),
+ zone_(gen->zone()) {}
void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
- __ SlowTruncateToI(result_, esp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_, esp, 0);
__ add(esp, Immediate(kDoubleSize));
}
private:
Register const result_;
XMMRegister const input_;
+ Zone* zone_;
};
@@ -252,7 +250,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -266,10 +265,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ lea(scratch1_, operand_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
private:
@@ -279,6 +278,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ Zone* zone_;
};
} // namespace
@@ -729,35 +729,35 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- /* Pass two doubles as arguments on the stack. */ \
- __ PrepareCallCFunction(4, eax); \
- __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
- __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 4); \
- /* Return value is in st(0) on ia32. */ \
- /* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
- __ fstp_d(Operand(esp, 0)); \
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
- __ add(esp, Immediate(kDoubleSize)); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ /* Pass two doubles as arguments on the stack. */ \
+ __ PrepareCallCFunction(4, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 4); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
} while (false)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(2, eax); \
- __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
- /* Return value is in st(0) on ia32. */ \
- /* Store it into the result register. */ \
- __ sub(esp, Immediate(kDoubleSize)); \
- __ fstp_d(Operand(esp, 0)); \
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
- __ add(esp, Immediate(kDoubleSize)); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ /* Pass one double as argument on the stack. */ \
+ __ PrepareCallCFunction(2, eax); \
+ __ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
+ /* Return value is in st(0) on ia32. */ \
+ /* Store it into the result register. */ \
+ __ sub(esp, Immediate(kDoubleSize)); \
+ __ fstp_d(Operand(esp, 0)); \
+ __ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
+ __ add(esp, Immediate(kDoubleSize)); \
} while (false)
#define ASSEMBLE_BINOP(asm_instr) \
@@ -839,7 +839,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -847,10 +847,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
+ tasm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
+ tasm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -871,7 +871,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -889,13 +889,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -909,7 +909,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -927,7 +927,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
no_reg, no_reg, no_reg);
}
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1139,8 +1139,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movaps(xmm1, xmm2);
__ movaps(xmm2, xmm0);
}
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ movaps(i.OutputDoubleRegister(), xmm3);
break;
}
@@ -1375,7 +1375,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEFloat32Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1553,7 +1553,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1630,25 +1630,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
@@ -1657,25 +1657,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
@@ -1687,7 +1687,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1695,7 +1695,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1703,7 +1703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1711,7 +1711,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
@@ -1897,38 +1897,92 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
- __ movd(dst, i.InputOperand(0));
- __ pshufd(dst, dst, 0x0);
+ __ Movd(dst, i.InputOperand(0));
+ __ Pshufd(dst, dst, 0x0);
break;
}
case kIA32I32x4ExtractLane: {
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
- case kIA32I32x4ReplaceLane: {
- __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ case kSSEI32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
- case kSSEI32x4Add: {
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kAVXI32x4ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
break;
}
- case kSSEI32x4Sub: {
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kSSEI32x4Add: {
+ __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
+ case kSSEI32x4Sub: {
+ __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
+ case kIA32I16x8Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Movd(dst, i.InputOperand(0));
+ __ Pshuflw(dst, dst, 0x0);
+ __ Pshufd(dst, dst, 0x0);
+ break;
+ }
+ case kIA32I16x8ExtractLane: {
+ Register dst = i.OutputRegister();
+ __ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsx_w(dst, dst);
+ break;
+ }
+ case kSSEI16x8ReplaceLane: {
+ __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kAVXI16x8ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kIA32I8x16Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Movd(dst, i.InputOperand(0));
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ __ Pshufb(dst, kScratchDoubleReg);
+ break;
+ }
+ case kIA32I8x16ExtractLane: {
+ Register dst = i.OutputRegister();
+ __ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsx_b(dst, dst);
+ break;
+ }
+ case kSSEI8x16ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
+ case kAVXI8x16ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpinsrb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
break;
@@ -1967,7 +2021,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kIA32StackCheck: {
ExternalReference const stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
+ ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
@@ -2115,7 +2169,6 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
default:
UNREACHABLE();
- return no_condition;
break;
}
}
@@ -2174,22 +2227,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0, esi);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- __ ud2();
- }
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
}
}
@@ -2284,9 +2335,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2464,11 +2515,45 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves = descriptor->CalleeSavedRegisters();
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ Register scratch = esi;
+ __ push(scratch);
+ __ mov(scratch,
+ Immediate(ExternalReference::address_of_real_stack_limit(
+ __ isolate())));
+ __ mov(scratch, Operand(scratch, 0));
+ __ add(scratch, Immediate(shrink_slots * kPointerSize));
+ __ cmp(esp, scratch);
+ __ pop(scratch);
+ __ j(above_equal, &done);
+ }
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ __ Move(esi, Smi::kZero);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ bind(&done);
+ }
__ sub(esp, Immediate(shrink_slots * kPointerSize));
}
@@ -2561,17 +2646,11 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Handle<HeapObject> src = src_constant.ToHeapObject();
if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
- __ LoadHeapObject(dst, src);
+ __ Move(dst, src);
} else {
DCHECK(destination->IsStackSlot());
Operand dst = g.ToOperand(destination);
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*src)) {
- __ PushHeapObject(src);
- __ pop(dst);
- } else {
- __ mov(dst, src);
- }
+ __ mov(dst, src);
}
} else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
@@ -2592,7 +2671,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64AsInt();
+ uint64_t src = src_constant.ToFloat64().AsUint64();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
@@ -2771,7 +2850,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 8bdfd0988d..67c141ebce 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -113,11 +113,20 @@ namespace compiler {
V(IA32StackCheck) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
- V(IA32I32x4ReplaceLane) \
+ V(SSEI32x4ReplaceLane) \
+ V(AVXI32x4ReplaceLane) \
V(SSEI32x4Add) \
- V(SSEI32x4Sub) \
V(AVXI32x4Add) \
- V(AVXI32x4Sub)
+ V(SSEI32x4Sub) \
+ V(AVXI32x4Sub) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLane) \
+ V(SSEI16x8ReplaceLane) \
+ V(AVXI16x8ReplaceLane) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLane) \
+ V(SSEI8x16ReplaceLane) \
+ V(AVXI8x16ReplaceLane)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 68db94fcff..9286e0febc 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -99,11 +99,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32BitcastIF:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
- case kIA32I32x4ReplaceLane:
+ case kSSEI32x4ReplaceLane:
+ case kAVXI32x4ReplaceLane:
case kSSEI32x4Add:
- case kSSEI32x4Sub:
case kAVXI32x4Add:
+ case kSSEI32x4Sub:
case kAVXI32x4Sub:
+ case kIA32I16x8Splat:
+ case kIA32I16x8ExtractLane:
+ case kSSEI16x8ReplaceLane:
+ case kAVXI16x8ReplaceLane:
+ case kIA32I8x16Splat:
+ case kIA32I8x16ExtractLane:
+ case kSSEI8x16ReplaceLane:
+ case kAVXI8x16ReplaceLane:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -111,8 +120,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Idiv:
case kIA32Udiv:
return (instr->addressing_mode() == kMode_None)
- ? kMayNeedDeoptCheck
- : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+ ? kMayNeedDeoptOrTrapCheck
+ : kMayNeedDeoptOrTrapCheck | kIsLoadOperation | kHasSideEffect;
case kIA32Movsxbl:
case kIA32Movzxbl:
@@ -143,7 +152,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index dccfced9e1..caf7abcbfc 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -247,9 +247,6 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -340,9 +337,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -410,9 +404,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -486,9 +477,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -900,9 +888,7 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(I32x4Add, kAVXI32x4Add, kSSEI32x4Add) \
- V(I32x4Sub, kAVXI32x4Sub, kSSEI32x4Sub)
+ V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
#define FLOAT_UNOP_LIST(V) \
V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
@@ -1542,6 +1528,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -1549,7 +1536,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -1905,24 +1893,55 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRO(this, node, kIA32I32x4Splat);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- IA32OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kIA32I32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- IA32OperandGenerator g(this);
- int32_t lane = OpParameter<int32_t>(node);
- Emit(kIA32I32x4ReplaceLane, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
- g.Use(node->InputAt(1)));
-}
+#define SIMD_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_BINOP_LIST(V) \
+ V(I32x4Add) \
+ V(I32x4Sub)
+
+#define VISIT_SIMD_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRO(this, node, kIA32##Type##Splat); \
+ }
+SIMD_TYPES(VISIT_SIMD_SPLAT)
+#undef VISIT_SIMD_SPLAT
+
+#define VISIT_SIMD_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ int32_t lane = OpParameter<int32_t>(node); \
+ Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
+ }
+SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+#undef VISIT_SIMD_EXTRACT_LANE
+
+#define VISIT_SIMD_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node)); \
+ InstructionOperand operand2 = g.Use(node->InputAt(1)); \
+ if (IsSupported(AVX)) { \
+ Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
+ operand1, operand2); \
+ } else { \
+ Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
+ operand1, operand2); \
+ } \
+ }
+SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+#undef VISIT_SIMD_REPLACE_LANE
+
+#define VISIT_SIMD_BINOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ VisitRROFloat(this, node, kAVX##Opcode, kSSE##Opcode); \
+ }
+SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
+#undef VISIT_SIMD_BINOP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index d4e0449ad9..df7a03163d 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -23,8 +23,6 @@
#include "src/compiler/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/s390/instruction-codes-s390.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/compiler/x87/instruction-codes-x87.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index cb3c2d66c6..e311abb2a2 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -77,7 +77,6 @@ void InstructionScheduler::ScheduleGraphNode::AddSuccessor(
node->unscheduled_predecessors_count_++;
}
-
InstructionScheduler::InstructionScheduler(Zone* zone,
InstructionSequence* sequence)
: zone_(zone),
@@ -86,16 +85,15 @@ InstructionScheduler::InstructionScheduler(Zone* zone,
last_side_effect_instr_(nullptr),
pending_loads_(zone),
last_live_in_reg_marker_(nullptr),
- last_deopt_(nullptr),
+ last_deopt_or_trap_(nullptr),
operands_map_(zone) {}
-
void InstructionScheduler::StartBlock(RpoNumber rpo) {
DCHECK(graph_.empty());
DCHECK(last_side_effect_instr_ == nullptr);
DCHECK(pending_loads_.empty());
DCHECK(last_live_in_reg_marker_ == nullptr);
- DCHECK(last_deopt_ == nullptr);
+ DCHECK(last_deopt_or_trap_ == nullptr);
DCHECK(operands_map_.empty());
sequence()->StartBlock(rpo);
}
@@ -112,7 +110,7 @@ void InstructionScheduler::EndBlock(RpoNumber rpo) {
last_side_effect_instr_ = nullptr;
pending_loads_.clear();
last_live_in_reg_marker_ = nullptr;
- last_deopt_ = nullptr;
+ last_deopt_or_trap_ = nullptr;
operands_map_.clear();
}
@@ -137,9 +135,9 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
// Make sure that instructions are not scheduled before the last
- // deoptimization point when they depend on it.
- if ((last_deopt_ != nullptr) && DependsOnDeoptimization(instr)) {
- last_deopt_->AddSuccessor(new_node);
+ // deoptimization or trap point when they depend on it.
+ if ((last_deopt_or_trap_ != nullptr) && DependsOnDeoptOrTrap(instr)) {
+ last_deopt_or_trap_->AddSuccessor(new_node);
}
// Instructions with side effects and memory operations can't be
@@ -160,13 +158,13 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
- } else if (instr->IsDeoptimizeCall()) {
- // Ensure that deopts are not reordered with respect to side-effect
- // instructions.
+ } else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
+ // Ensure that deopts or traps are not reordered with respect to
+ // side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
- last_deopt_ = new_node;
+ last_deopt_or_trap_ = new_node;
}
// Look for operand dependencies.
@@ -244,7 +242,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchParentFramePointer:
case kArchTruncateDoubleToI:
case kArchStackSlot:
- case kArchDebugBreak:
case kArchComment:
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
@@ -292,6 +289,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchLookupSwitch:
case kArchTableSwitch:
case kArchRet:
+ case kArchDebugBreak:
case kArchThrowTerminator:
return kIsBlockTerminator;
@@ -370,7 +368,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index 7660520b6d..db2894a92a 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -21,10 +21,11 @@ enum ArchOpcodeFlags {
kHasSideEffect = 2, // The instruction has some side effects (memory
// store, function call...)
kIsLoadOperation = 4, // The instruction is a memory load.
- kMayNeedDeoptCheck = 8, // The instruction might be associated with a deopt
- // check. This is the case of instruction which can
- // blow up with particular inputs (e.g.: division by
- // zero on Intel platforms).
+ kMayNeedDeoptOrTrapCheck = 8, // The instruction may be associated with a
+ // deopt or trap check which must be run before
+ // instruction e.g. div on Intel platform which
+ // will raise an exception when the divisor is
+ // zero.
};
class InstructionScheduler final : public ZoneObject {
@@ -166,17 +167,22 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
- // Return true if this instruction is usually associated with a deopt check
- // to validate its input.
- bool MayNeedDeoptCheck(const Instruction* instr) const {
- return (GetInstructionFlags(instr) & kMayNeedDeoptCheck) != 0;
+ // The scheduler will not move the following instructions before the last
+ // deopt/trap check:
+ // * loads (this is conservative)
+ // * instructions with side effect
+ // * other deopts/traps
+ // Any other instruction can be moved, apart from those that raise exceptions
+ // on specific inputs - these are filtered out by the deopt/trap check.
+ bool MayNeedDeoptOrTrapCheck(const Instruction* instr) const {
+ return (GetInstructionFlags(instr) & kMayNeedDeoptOrTrapCheck) != 0;
}
- // Return true if the instruction cannot be moved before the last deopt
- // point we encountered.
- bool DependsOnDeoptimization(const Instruction* instr) const {
- return MayNeedDeoptCheck(instr) || instr->IsDeoptimizeCall() ||
- HasSideEffect(instr) || IsLoadOperation(instr);
+ // Return true if the instruction cannot be moved before the last deopt or
+ // trap point we encountered.
+ bool DependsOnDeoptOrTrap(const Instruction* instr) const {
+ return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
+ instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
@@ -217,8 +223,9 @@ class InstructionScheduler final : public ZoneObject {
// other instructions in the basic block.
ScheduleGraphNode* last_live_in_reg_marker_;
- // Last deoptimization instruction encountered while building the graph.
- ScheduleGraphNode* last_deopt_;
+ // Last deoptimization or trap instruction encountered while building the
+ // graph.
+ ScheduleGraphNode* last_deopt_or_trap_;
// Keep track of definition points for virtual registers. This is used to
// record operand dependencies in the scheduling graph.
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index ecda453351..8334d1751a 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -255,7 +255,6 @@ class OperandGenerator {
break;
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
static Constant ToNegatedConstant(const Node* node) {
@@ -268,7 +267,6 @@ class OperandGenerator {
break;
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
UnallocatedOperand Define(Node* node, UnallocatedOperand operand) {
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 1d07799511..813372881e 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -302,10 +302,11 @@ int InstructionSelector::GetRename(int virtual_register) {
void InstructionSelector::TryRename(InstructionOperand* op) {
if (!op->IsUnallocated()) return;
- int vreg = UnallocatedOperand::cast(op)->virtual_register();
+ UnallocatedOperand* unalloc = UnallocatedOperand::cast(op);
+ int vreg = unalloc->virtual_register();
int rename = GetRename(vreg);
if (rename != vreg) {
- UnallocatedOperand::cast(op)->set_virtual_register(rename);
+ *unalloc = UnallocatedOperand(*unalloc, rename);
}
}
@@ -471,7 +472,6 @@ InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
}
}
UNREACHABLE();
- return InstructionOperand();
}
} // namespace
@@ -526,7 +526,6 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
}
case IrOpcode::kObjectState: {
UNREACHABLE();
- return 0;
}
case IrOpcode::kTypedObjectState: {
size_t id = deduplicator->GetObjectId(input);
@@ -598,8 +597,7 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
DCHECK_EQ(values_descriptor->size(), 0u);
- values_descriptor->ReserveSize(
- descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+ values_descriptor->ReserveSize(descriptor->GetSize());
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
@@ -767,10 +765,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
while (buffer->frame_state_descriptor != nullptr &&
- (buffer->frame_state_descriptor->type() ==
- FrameStateType::kArgumentsAdaptor ||
- buffer->frame_state_descriptor->type() ==
- FrameStateType::kTailCallerFunction)) {
+ buffer->frame_state_descriptor->type() ==
+ FrameStateType::kArgumentsAdaptor) {
frame_state = NodeProperties::GetFrameStateInput(frame_state);
buffer->frame_state_descriptor =
buffer->frame_state_descriptor->outer_state();
@@ -982,8 +978,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
DCHECK_LE(sw.min_value, sw.max_value);
// Note that {value_range} can be 0 if {min_value} is -2^31 and
- // {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
+ // {max_value} is 2^31-1, so don't assume that it's non-zero below.
sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
bit_cast<uint32_t>(sw.min_value);
return VisitSwitch(input, sw);
@@ -1525,13 +1520,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
case IrOpcode::kF32x4Eq:
- return MarkAsSimd1x4(node), VisitF32x4Eq(node);
+ return MarkAsSimd128(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
- return MarkAsSimd1x4(node), VisitF32x4Ne(node);
+ return MarkAsSimd128(node), VisitF32x4Ne(node);
case IrOpcode::kF32x4Lt:
- return MarkAsSimd1x4(node), VisitF32x4Lt(node);
+ return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
- return MarkAsSimd1x4(node), VisitF32x4Le(node);
+ return MarkAsSimd128(node), VisitF32x4Le(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -1563,13 +1558,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI32x4MaxS:
return MarkAsSimd128(node), VisitI32x4MaxS(node);
case IrOpcode::kI32x4Eq:
- return MarkAsSimd1x4(node), VisitI32x4Eq(node);
+ return MarkAsSimd128(node), VisitI32x4Eq(node);
case IrOpcode::kI32x4Ne:
- return MarkAsSimd1x4(node), VisitI32x4Ne(node);
- case IrOpcode::kI32x4LtS:
- return MarkAsSimd1x4(node), VisitI32x4LtS(node);
- case IrOpcode::kI32x4LeS:
- return MarkAsSimd1x4(node), VisitI32x4LeS(node);
+ return MarkAsSimd128(node), VisitI32x4Ne(node);
+ case IrOpcode::kI32x4GtS:
+ return MarkAsSimd128(node), VisitI32x4GtS(node);
+ case IrOpcode::kI32x4GeS:
+ return MarkAsSimd128(node), VisitI32x4GeS(node);
case IrOpcode::kI32x4UConvertF32x4:
return MarkAsSimd128(node), VisitI32x4UConvertF32x4(node);
case IrOpcode::kI32x4UConvertI16x8Low:
@@ -1582,10 +1577,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4MinU(node);
case IrOpcode::kI32x4MaxU:
return MarkAsSimd128(node), VisitI32x4MaxU(node);
- case IrOpcode::kI32x4LtU:
- return MarkAsSimd1x4(node), VisitI32x4LtU(node);
- case IrOpcode::kI32x4LeU:
- return MarkAsSimd1x4(node), VisitI32x4LeU(node);
+ case IrOpcode::kI32x4GtU:
+ return MarkAsSimd128(node), VisitI32x4GtU(node);
+ case IrOpcode::kI32x4GeU:
+ return MarkAsSimd128(node), VisitI32x4GeU(node);
case IrOpcode::kI16x8Splat:
return MarkAsSimd128(node), VisitI16x8Splat(node);
case IrOpcode::kI16x8ExtractLane:
@@ -1621,13 +1616,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI16x8MaxS:
return MarkAsSimd128(node), VisitI16x8MaxS(node);
case IrOpcode::kI16x8Eq:
- return MarkAsSimd1x8(node), VisitI16x8Eq(node);
+ return MarkAsSimd128(node), VisitI16x8Eq(node);
case IrOpcode::kI16x8Ne:
- return MarkAsSimd1x8(node), VisitI16x8Ne(node);
- case IrOpcode::kI16x8LtS:
- return MarkAsSimd1x8(node), VisitI16x8LtS(node);
- case IrOpcode::kI16x8LeS:
- return MarkAsSimd1x8(node), VisitI16x8LeS(node);
+ return MarkAsSimd128(node), VisitI16x8Ne(node);
+ case IrOpcode::kI16x8GtS:
+ return MarkAsSimd128(node), VisitI16x8GtS(node);
+ case IrOpcode::kI16x8GeS:
+ return MarkAsSimd128(node), VisitI16x8GeS(node);
case IrOpcode::kI16x8UConvertI8x16Low:
return MarkAsSimd128(node), VisitI16x8UConvertI8x16Low(node);
case IrOpcode::kI16x8UConvertI8x16High:
@@ -1644,10 +1639,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8MinU(node);
case IrOpcode::kI16x8MaxU:
return MarkAsSimd128(node), VisitI16x8MaxU(node);
- case IrOpcode::kI16x8LtU:
- return MarkAsSimd1x8(node), VisitI16x8LtU(node);
- case IrOpcode::kI16x8LeU:
- return MarkAsSimd1x8(node), VisitI16x8LeU(node);
+ case IrOpcode::kI16x8GtU:
+ return MarkAsSimd128(node), VisitI16x8GtU(node);
+ case IrOpcode::kI16x8GeU:
+ return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kI8x16Splat:
return MarkAsSimd128(node), VisitI8x16Splat(node);
case IrOpcode::kI8x16ExtractLane:
@@ -1677,13 +1672,13 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kI8x16MaxS:
return MarkAsSimd128(node), VisitI8x16MaxS(node);
case IrOpcode::kI8x16Eq:
- return MarkAsSimd1x16(node), VisitI8x16Eq(node);
+ return MarkAsSimd128(node), VisitI8x16Eq(node);
case IrOpcode::kI8x16Ne:
- return MarkAsSimd1x16(node), VisitI8x16Ne(node);
- case IrOpcode::kI8x16LtS:
- return MarkAsSimd1x16(node), VisitI8x16LtS(node);
- case IrOpcode::kI8x16LeS:
- return MarkAsSimd1x16(node), VisitI8x16LeS(node);
+ return MarkAsSimd128(node), VisitI8x16Ne(node);
+ case IrOpcode::kI8x16GtS:
+ return MarkAsSimd128(node), VisitI8x16GtS(node);
+ case IrOpcode::kI8x16GeS:
+ return MarkAsSimd128(node), VisitI8x16GeS(node);
case IrOpcode::kI8x16ShrU:
return MarkAsSimd128(node), VisitI8x16ShrU(node);
case IrOpcode::kI8x16UConvertI16x8:
@@ -1696,10 +1691,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI8x16MinU(node);
case IrOpcode::kI8x16MaxU:
return MarkAsSimd128(node), VisitI8x16MaxU(node);
- case IrOpcode::kI8x16LtU:
- return MarkAsSimd1x16(node), VisitI8x16LtU(node);
- case IrOpcode::kI8x16LeU:
- return MarkAsSimd1x16(node), VisitI16x8LeU(node);
+ case IrOpcode::kI8x16GtU:
+ return MarkAsSimd128(node), VisitI8x16GtU(node);
+ case IrOpcode::kI8x16GeU:
+ return MarkAsSimd128(node), VisitI16x8GeU(node);
case IrOpcode::kS128Zero:
return MarkAsSimd128(node), VisitS128Zero(node);
case IrOpcode::kS128And:
@@ -1710,56 +1705,18 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
- case IrOpcode::kS32x4Shuffle:
- return MarkAsSimd128(node), VisitS32x4Shuffle(node);
- case IrOpcode::kS32x4Select:
- return MarkAsSimd128(node), VisitS32x4Select(node);
- case IrOpcode::kS16x8Shuffle:
- return MarkAsSimd128(node), VisitS16x8Shuffle(node);
- case IrOpcode::kS16x8Select:
- return MarkAsSimd128(node), VisitS16x8Select(node);
+ case IrOpcode::kS128Select:
+ return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
- case IrOpcode::kS8x16Select:
- return MarkAsSimd128(node), VisitS8x16Select(node);
- case IrOpcode::kS1x4Zero:
- return MarkAsSimd1x4(node), VisitS1x4Zero(node);
- case IrOpcode::kS1x4And:
- return MarkAsSimd1x4(node), VisitS1x4And(node);
- case IrOpcode::kS1x4Or:
- return MarkAsSimd1x4(node), VisitS1x4Or(node);
- case IrOpcode::kS1x4Xor:
- return MarkAsSimd1x4(node), VisitS1x4Xor(node);
- case IrOpcode::kS1x4Not:
- return MarkAsSimd1x4(node), VisitS1x4Not(node);
case IrOpcode::kS1x4AnyTrue:
return MarkAsWord32(node), VisitS1x4AnyTrue(node);
case IrOpcode::kS1x4AllTrue:
return MarkAsWord32(node), VisitS1x4AllTrue(node);
- case IrOpcode::kS1x8Zero:
- return MarkAsSimd1x8(node), VisitS1x8Zero(node);
- case IrOpcode::kS1x8And:
- return MarkAsSimd1x8(node), VisitS1x8And(node);
- case IrOpcode::kS1x8Or:
- return MarkAsSimd1x8(node), VisitS1x8Or(node);
- case IrOpcode::kS1x8Xor:
- return MarkAsSimd1x8(node), VisitS1x8Xor(node);
- case IrOpcode::kS1x8Not:
- return MarkAsSimd1x8(node), VisitS1x8Not(node);
case IrOpcode::kS1x8AnyTrue:
return MarkAsWord32(node), VisitS1x8AnyTrue(node);
case IrOpcode::kS1x8AllTrue:
return MarkAsWord32(node), VisitS1x8AllTrue(node);
- case IrOpcode::kS1x16Zero:
- return MarkAsSimd1x16(node), VisitS1x16Zero(node);
- case IrOpcode::kS1x16And:
- return MarkAsSimd1x16(node), VisitS1x16And(node);
- case IrOpcode::kS1x16Or:
- return MarkAsSimd1x16(node), VisitS1x16Or(node);
- case IrOpcode::kS1x16Xor:
- return MarkAsSimd1x16(node), VisitS1x16Xor(node);
- case IrOpcode::kS1x16Not:
- return MarkAsSimd1x16(node), VisitS1x16Not(node);
case IrOpcode::kS1x16AnyTrue:
return MarkAsWord32(node), VisitS1x16AnyTrue(node);
case IrOpcode::kS1x16AllTrue:
@@ -1874,6 +1831,7 @@ void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.value_range;
+ DCHECK_LE(sw.value_range, std::numeric_limits<size_t>::max() - 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = index_operand;
InstructionOperand default_operand = g.Label(sw.default_branch);
@@ -1893,6 +1851,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
InstructionOperand& value_operand) {
OperandGenerator g(this);
size_t input_count = 2 + sw.case_count * 2;
+ DCHECK_LE(sw.case_count, (std::numeric_limits<size_t>::max() - 2) / 2);
auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
inputs[0] = value_operand;
inputs[1] = g.Label(sw.default_branch);
@@ -2084,7 +2043,8 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2108,13 +2068,9 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
}
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2132,10 +2088,11 @@ void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-#endif // V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
@@ -2145,11 +2102,12 @@ void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 &&
- // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2169,14 +2127,17 @@ void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2184,9 +2145,11 @@ void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@@ -2214,28 +2177,36 @@ void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
@@ -2253,15 +2224,17 @@ void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
@@ -2283,14 +2256,13 @@ void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2302,49 +2274,58 @@ void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LeU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
@@ -2358,23 +2339,31 @@ void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16LeS(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
+ !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2386,15 +2375,11 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI8x16LtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16LeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
@@ -2402,87 +2387,34 @@ void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x4Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Zero(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Zero(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitS32x4Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS16x8Shuffle(Node* node) { UNIMPLEMENTED(); }
-
-#endif // !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
+ // && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
-#endif // !V8_TARGET_ARCH_ARM
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x4Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x4AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x8And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x8Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x8AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x8AllTrue(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS1x16And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS1x16Not(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS1x16AnyTrue(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x16AllTrue(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 26cc85a81f..512b6d1775 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -263,27 +263,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void MarkAsSimd128(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
}
- void MarkAsSimd1x4(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
- } else {
- MarkAsSimd128(node);
- }
- }
- void MarkAsSimd1x8(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
- } else {
- MarkAsSimd128(node);
- }
- }
- void MarkAsSimd1x16(Node* node) {
- if (kSimdMaskRegisters) {
- MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
- } else {
- MarkAsSimd128(node);
- }
- }
void MarkAsReference(Node* node) {
MarkAsRepresentation(MachineRepresentation::kTagged, node);
}
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 1067d2030a..8096c5b048 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -64,7 +64,6 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
return condition;
}
UNREACHABLE();
- return condition;
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
@@ -210,15 +209,6 @@ std::ostream& operator<<(std::ostream& os,
case MachineRepresentation::kSimd128:
os << "|s128";
break;
- case MachineRepresentation::kSimd1x4:
- os << "|s1x4";
- break;
- case MachineRepresentation::kSimd1x8:
- os << "|s1x8";
- break;
- case MachineRepresentation::kSimd1x16:
- os << "|s1x16";
- break;
case MachineRepresentation::kTaggedSigned:
os << "|ts";
break;
@@ -235,7 +225,6 @@ std::ostream& operator<<(std::ostream& os,
return os << "(x)";
}
UNREACHABLE();
- return os;
}
void MoveOperands::Print(const RegisterConfiguration* config) const {
@@ -415,7 +404,6 @@ std::ostream& operator<<(std::ostream& os, const ArchOpcode& ao) {
#undef CASE
}
UNREACHABLE();
- return os;
}
@@ -430,7 +418,6 @@ std::ostream& operator<<(std::ostream& os, const AddressingMode& am) {
#undef CASE
}
UNREACHABLE();
- return os;
}
@@ -448,7 +435,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os << "trap";
}
UNREACHABLE();
- return os;
}
@@ -504,7 +490,6 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
return os << "negative";
}
UNREACHABLE();
- return os;
}
@@ -576,6 +561,12 @@ Handle<HeapObject> Constant::ToHeapObject() const {
return value;
}
+Handle<Code> Constant::ToCode() const {
+ DCHECK_EQ(kHeapObject, type());
+ Handle<Code> value(bit_cast<Code**>(static_cast<intptr_t>(value_)));
+ return value;
+}
+
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
@@ -585,7 +576,7 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
case Constant::kFloat32:
return os << constant.ToFloat32() << "f";
case Constant::kFloat64:
- return os << constant.ToFloat64();
+ return os << constant.ToFloat64().value();
case Constant::kExternalReference:
return os << static_cast<const void*>(
constant.ToExternalReference().address());
@@ -595,7 +586,6 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << "RPO" << constant.ToRpoNumber().ToInt();
}
UNREACHABLE();
- return os;
}
@@ -896,21 +886,17 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
return InstructionSequence::DefaultRepresentation();
case MachineRepresentation::kWord32:
case MachineRepresentation::kWord64:
- case MachineRepresentation::kFloat32:
- case MachineRepresentation::kFloat64:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
return rep;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return MachineRepresentation::kNone;
}
@@ -1033,18 +1019,9 @@ FrameStateDescriptor::FrameStateDescriptor(
shared_info_(shared_info),
outer_state_(outer_state) {}
-
-size_t FrameStateDescriptor::GetSize(OutputFrameStateCombine combine) const {
- size_t size = 1 + parameters_count() + locals_count() + stack_count() +
- (HasContext() ? 1 : 0);
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput:
- size += combine.GetPushCount();
- break;
- case OutputFrameStateCombine::kPokeAt:
- break;
- }
- return size;
+size_t FrameStateDescriptor::GetSize() const {
+ return 1 + parameters_count() + locals_count() + stack_count() +
+ (HasContext() ? 1 : 0);
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 5cb28627de..668a5c0efd 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -15,6 +15,7 @@
#include "src/compiler/frame.h"
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
+#include "src/double.h"
#include "src/globals.h"
#include "src/macro-assembler.h"
#include "src/register-configuration.h"
@@ -34,8 +35,6 @@ class V8_EXPORT_PRIVATE InstructionOperand {
public:
static const int kInvalidVirtualRegister = -1;
- // TODO(dcarney): recover bit. INVALID can be represented as UNALLOCATED with
- // kInvalidVirtualRegister and some DCHECKS.
enum Kind {
INVALID,
UNALLOCATED,
@@ -167,7 +166,7 @@ std::ostream& operator<<(std::ostream& os,
return *static_cast<const OperandType*>(&op); \
}
-class UnallocatedOperand : public InstructionOperand {
+class UnallocatedOperand final : public InstructionOperand {
public:
enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
@@ -183,15 +182,14 @@ class UnallocatedOperand : public InstructionOperand {
// Lifetime of operand inside the instruction.
enum Lifetime {
- // USED_AT_START operand is guaranteed to be live only at
- // instruction start. Register allocator is free to assign the same register
- // to some other operand used inside instruction (i.e. temporary or
- // output).
+ // USED_AT_START operand is guaranteed to be live only at instruction start.
+ // The register allocator is free to assign the same register to some other
+ // operand used inside instruction (i.e. temporary or output).
USED_AT_START,
- // USED_AT_END operand is treated as live until the end of
- // instruction. This means that register allocator will not reuse it's
- // register for any other operand inside instruction.
+ // USED_AT_END operand is treated as live until the end of instruction.
+ // This means that register allocator will not reuse its register for any
+ // other operand inside instruction.
USED_AT_END
};
@@ -233,6 +231,12 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= SecondaryStorageField::encode(slot_id);
}
+ UnallocatedOperand(const UnallocatedOperand& other, int virtual_register) {
+ DCHECK_NE(kInvalidVirtualRegister, virtual_register);
+ value_ = VirtualRegisterField::update(
+ other.value_, static_cast<uint32_t>(virtual_register));
+ }
+
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@@ -275,7 +279,6 @@ class UnallocatedOperand : public InstructionOperand {
// [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
BasicPolicy basic_policy() const {
- DCHECK_EQ(UNALLOCATED, kind());
return BasicPolicyField::decode(value_);
}
@@ -300,16 +303,9 @@ class UnallocatedOperand : public InstructionOperand {
// [virtual_register]: The virtual register ID for this operand.
int32_t virtual_register() const {
- DCHECK_EQ(UNALLOCATED, kind());
return static_cast<int32_t>(VirtualRegisterField::decode(value_));
}
- // TODO(dcarney): remove this.
- void set_virtual_register(int32_t id) {
- DCHECK_EQ(UNALLOCATED, kind());
- value_ = VirtualRegisterField::update(value_, static_cast<uint32_t>(id));
- }
-
// [lifetime]: Only for non-FIXED_SLOT.
bool IsUsedAtStart() const {
DCHECK(basic_policy() == EXTENDED_POLICY);
@@ -484,9 +480,6 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
@@ -498,7 +491,6 @@ class LocationOperand : public InstructionOperand {
return false;
}
UNREACHABLE();
- return false;
}
static LocationOperand* cast(InstructionOperand* op) {
@@ -596,9 +588,8 @@ bool InstructionOperand::IsDoubleRegister() const {
}
bool InstructionOperand::IsSimd128Register() const {
- return IsAnyRegister() &&
- LocationOperand::cast(this)->representation() ==
- MachineRepresentation::kSimd128;
+ return IsAnyRegister() && LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd128;
}
bool InstructionOperand::IsAnyStackSlot() const {
@@ -903,6 +894,10 @@ class V8_EXPORT_PRIVATE Instruction final {
FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
+ bool IsTrap() const {
+ return FlagsModeField::decode(opcode()) == kFlags_trap;
+ }
+
bool IsJump() const { return arch_opcode() == ArchOpcode::kArchJmp; }
bool IsRet() const { return arch_opcode() == ArchOpcode::kArchRet; }
bool IsTailCall() const {
@@ -1080,19 +1075,9 @@ class V8_EXPORT_PRIVATE Constant final {
return bit_cast<uint32_t>(static_cast<int32_t>(value_));
}
- double ToFloat64() const {
- // TODO(ahaas): We should remove this function. If value_ has the bit
- // representation of a signalling NaN, then returning it as float can cause
- // the signalling bit to flip, and value_ is returned as a quiet NaN.
- if (type() == kInt32) return ToInt32();
- DCHECK_EQ(kFloat64, type());
- return bit_cast<double>(value_);
- }
-
- uint64_t ToFloat64AsInt() const {
- if (type() == kInt32) return ToInt32();
+ Double ToFloat64() const {
DCHECK_EQ(kFloat64, type());
- return bit_cast<uint64_t>(value_);
+ return Double(bit_cast<uint64_t>(value_));
}
ExternalReference ToExternalReference() const {
@@ -1106,6 +1091,7 @@ class V8_EXPORT_PRIVATE Constant final {
}
Handle<HeapObject> ToHeapObject() const;
+ Handle<Code> ToCode() const;
private:
Type type_;
@@ -1302,11 +1288,11 @@ class FrameStateDescriptor : public ZoneObject {
MaybeHandle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateDescriptor* outer_state() const { return outer_state_; }
bool HasContext() const {
- return FrameStateFunctionInfo::IsJSFunctionType(type_);
+ return FrameStateFunctionInfo::IsJSFunctionType(type_) ||
+ type_ == FrameStateType::kBuiltinContinuation;
}
- size_t GetSize(OutputFrameStateCombine combine =
- OutputFrameStateCombine::Ignore()) const;
+ size_t GetSize() const;
size_t GetTotalSize() const;
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
@@ -1599,7 +1585,6 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
}
UNREACHABLE();
- return Constant(static_cast<int32_t>(0));
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 82c91cc0eb..19db874ca6 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/zone.h"
@@ -289,15 +290,15 @@ void Int64Lowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kCall: {
- // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
(descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Int64())) {
// We have to adjust the call descriptor.
- const Operator* op = common()->Call(
- wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), descriptor));
+ const Operator* op =
+ common()->Call(GetI32WasmCallDescriptor(zone(), descriptor));
NodeProperties::ChangeOp(node, op);
}
if (descriptor->ReturnCount() == 1 &&
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 9ca0c63eb9..0955ff5ec9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -165,7 +165,7 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
// If the receiver map has packed elements, no need to check the prototype.
// This requires a MapCheck where this is used.
- if (!IsFastHoleyElementsKind(receiver_map->elements_kind())) return true;
+ if (!IsHoleyElementsKind(receiver_map->elements_kind())) return true;
Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
isolate);
@@ -254,7 +254,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
// on the prototype chain.
map_index += static_cast<int>(receiver_map->elements_kind());
object_map = jsgraph()->Constant(receiver_map);
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
Handle<JSObject> initial_array_prototype(
native_context()->initial_array_prototype(), isolate());
dependencies()->AssumePrototypeMapsStable(receiver_map,
@@ -344,7 +344,7 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
iterator_map->instance_type());
- if (IsFastHoleyElementsKind(elements_kind)) {
+ if (IsHoleyElementsKind(elements_kind)) {
if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
return NoChange();
} else {
@@ -416,12 +416,12 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
elements, index, etrue1, if_true1);
// Convert hole to undefined if needed.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
value = graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(),
value);
- } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
- // TODO(bmeurer): avoid deopt if not all uses of value are truncated.
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
+ // TODO(6587): avoid deopt if not all uses of value are truncated.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kAllowReturnHole;
value = etrue1 = graph()->NewNode(
simplified()->CheckFloat64Hole(mode), value, etrue1, if_true1);
@@ -847,7 +847,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
+ receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -882,7 +882,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
receiver, efalse, if_false);
// Ensure that we aren't popping from a copy-on-write backing store.
- if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
elements = efalse =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, efalse, if_false);
@@ -919,7 +919,7 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
value =
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
@@ -976,10 +976,10 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// currently don't have a proper way to deal with this; the proper solution
// here is to learn on deopt, i.e. disable Array.prototype.push inlining
// for this function.
- if (IsFastSmiElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiElementsKind(receiver_map->elements_kind())) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
value = effect =
graph()->NewNode(simplified()->CheckNumber(), value, effect, control);
// Make sure we do not store signaling NaNs into double arrays.
@@ -1002,7 +1002,7 @@ Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
// don't necessarily learn from it. See the comment on the value type check
// above.
GrowFastElementsFlags flags = GrowFastElementsFlag::kArrayObject;
- if (IsFastDoubleElementsKind(receiver_map->elements_kind())) {
+ if (IsDoubleElementsKind(receiver_map->elements_kind())) {
flags |= GrowFastElementsFlag::kDoubleElements;
}
elements = effect =
@@ -1039,7 +1039,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
Handle<Map> receiver_map;
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
+ receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -1087,7 +1087,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
// Ensure that we aren't shifting a copy-on-write backing store.
- if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
elements = etrue1 =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, etrue1, if_true1);
@@ -1187,7 +1187,7 @@ Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
- if (IsFastHoleyElementsKind(receiver_map->elements_kind())) {
+ if (IsHoleyElementsKind(receiver_map->elements_kind())) {
value =
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
@@ -1202,33 +1202,365 @@ namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
InstanceType instance_type) {
- for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckMaps &&
- NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
- ZoneHandleSet<Map> const& maps =
- CheckMapsParametersOf(dominator->op()).maps();
- // Check if all maps have the given {instance_type}.
- for (size_t i = 0; i < maps.size(); ++i) {
- if (maps[i]->instance_type() != instance_type) return false;
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ switch (result) {
+ case NodeProperties::kUnreliableReceiverMaps:
+ case NodeProperties::kReliableReceiverMaps:
+ DCHECK_NE(0, receiver_maps.size());
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != instance_type) return false;
}
return true;
- }
- // The instance type doesn't change for JSReceiver values, so we
- // don't need to pay attention to potentially side-effecting nodes
- // here. Strings and internal structures like FixedArray and
- // FixedDoubleArray are weird here, but we don't use this function then.
- DCHECK_LE(FIRST_JS_RECEIVER_TYPE, instance_type);
- DCHECK_EQ(1, dominator->op()->EffectOutputCount());
- if (dominator->op()->EffectInputCount() != 1) {
- // Didn't find any appropriate CheckMaps node.
+
+ case NodeProperties::kNoReceiverMaps:
return false;
- }
- dominator = NodeProperties::GetEffectInput(dominator);
}
+ UNREACHABLE();
}
} // namespace
+Reduction JSBuiltinReducer::ReduceCollectionIterator(
+ Node* node, InstanceType collection_instance_type,
+ int collection_iterator_map_index) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ // Figure out the proper collection iterator map.
+ Handle<Map> collection_iterator_map(
+ Map::cast(native_context()->get(collection_iterator_map_index)),
+ isolate());
+
+ // Load the OrderedHashTable from the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ receiver, effect, control);
+
+ // Create the JSCollectionIterator result.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherObject(), NOT_TENURED),
+ jsgraph()->Constant(JSCollectionIterator::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->Constant(collection_iterator_map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
+ value, table, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ value, jsgraph()->ZeroConstant(), effect, control);
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceCollectionSize(
+ Node* node, InstanceType collection_instance_type) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (HasInstanceTypeWitness(receiver, effect, collection_instance_type)) {
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+ receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
+ Node* node, int entry_size,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // A word of warning to begin with: This whole method might look a bit
+ // strange at times, but that's mostly because it was carefully handcrafted
+ // to allow for full escape analysis and scalar replacement of both the
+ // collection iterator object and the iterator results, including the
+ // key-value arrays in case of Set/Map entry iteration.
+ //
+ // TODO(turbofan): Currently the escape analysis (and the store-load
+ // forwarding) is unable to eliminate the allocations for the key-value
+ // arrays in case of Set/Map entry iteration, and we should investigate
+ // how to update the escape analysis / arrange the graph in a way that
+ // this becomes possible.
+
+ // Infer the {receiver} instance type.
+ InstanceType receiver_instance_type;
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+ receiver_instance_type = receiver_maps[0]->instance_type();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ if (receiver_maps[i]->instance_type() != receiver_instance_type) {
+ return NoChange();
+ }
+ }
+ if (receiver_instance_type < collection_iterator_instance_type_first ||
+ receiver_instance_type > collection_iterator_instance_type_last) {
+ return NoChange();
+ }
+
+ // Transition the JSCollectionIterator {receiver} if necessary
+ // (i.e. there were certain mutations while we're iterating).
+ {
+ Node* done_loop;
+ Node* done_eloop;
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+
+ // Check if reached the final table of the {receiver}.
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+ Node* next_table = effect =
+ graph()->NewNode(simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNextTable()),
+ table, effect, control);
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), next_table);
+ control =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Abort the {loop} when we reach the final table.
+ done_loop = graph()->NewNode(common()->IfTrue(), control);
+ done_eloop = effect;
+
+ // Migrate to the {next_table} otherwise.
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Self-heal the {receiver}s index.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kOrderedHashTableHealIndex);
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kEliminatable);
+ index = effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
+ index, jsgraph()->NoContextConstant(), effect);
+
+ // Update the {index} and {table} on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, next_table, effect, control);
+
+ // Tie the knot.
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+
+ control = done_loop;
+ effect = done_eloop;
+ }
+
+ // Get current index and table from the JSCollectionIterator {receiver}.
+ Node* index = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, effect, control);
+ Node* table = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver, effect, control);
+
+ // Create the {JSIteratorResult} first to ensure that we always have
+ // a dominating Allocate node for the allocation folding phase.
+ Node* iterator_result = effect = graph()->NewNode(
+ javascript()->CreateIterResultObject(), jsgraph()->UndefinedConstant(),
+ jsgraph()->TrueConstant(), context, effect);
+
+ // Look for the next non-holey key, starting from {index} in the {table}.
+ Node* controls[2];
+ Node* effects[3];
+ {
+ // Compute the currently used capacity.
+ Node* number_of_buckets = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfBuckets()),
+ table, effect, control);
+ Node* number_of_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfElements()),
+ table, effect, control);
+ Node* number_of_deleted_elements = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForOrderedHashTableBaseNumberOfDeletedElements()),
+ table, effect, control);
+ Node* used_capacity =
+ graph()->NewNode(simplified()->NumberAdd(), number_of_elements,
+ number_of_deleted_elements);
+
+ // Skip holes and update the {index}.
+ Node* loop = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* iloop = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), index, index, loop);
+ NodeProperties::SetType(iloop, type_cache_.kFixedArrayLengthType);
+ {
+ Node* check0 = graph()->NewNode(simplified()->NumberLessThan(), iloop,
+ used_capacity);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, loop);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = eloop;
+ {
+ // Mark the {receiver} as exhausted.
+ efalse0 = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorTable()),
+ receiver,
+ jsgraph()->HeapConstant(factory()->empty_ordered_hash_table()),
+ efalse0, if_false0);
+
+ controls[0] = if_false0;
+ effects[0] = efalse0;
+ }
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = eloop;
+ {
+ // Load the key of the entry.
+ Node* entry_start_position = graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(
+ simplified()->NumberAdd(),
+ graph()->NewNode(simplified()->NumberMultiply(), iloop,
+ jsgraph()->Constant(entry_size)),
+ number_of_buckets),
+ jsgraph()->Constant(OrderedHashTableBase::kHashTableStartIndex));
+ Node* entry_key = etrue0 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ table, entry_start_position, etrue0, if_true0);
+
+ // Advance the index.
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), iloop,
+ jsgraph()->OneConstant());
+
+ Node* check1 =
+ graph()->NewNode(simplified()->ReferenceEqual(), entry_key,
+ jsgraph()->TheHoleConstant());
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_true0);
+
+ {
+ // Abort loop with resulting value.
+ Node* control = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* effect = etrue0;
+ Node* value = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), entry_key, control);
+ Node* done = jsgraph()->FalseConstant();
+
+ // Advance the index on the {receiver}.
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSCollectionIteratorIndex()),
+ receiver, index, effect, control);
+
+ // The actual {value} depends on the {receiver} iteration type.
+ switch (receiver_instance_type) {
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ break;
+
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(), value,
+ value, context, effect);
+ break;
+
+ case JS_MAP_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ break;
+
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ value = effect = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement()),
+ table,
+ graph()->NewNode(
+ simplified()->NumberAdd(), entry_start_position,
+ jsgraph()->Constant(OrderedHashMap::kValueOffset)),
+ effect, control);
+ value = effect =
+ graph()->NewNode(javascript()->CreateKeyValueArray(),
+ entry_key, value, context, effect);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ // Store final {value} and {done} into the {iterator_result}.
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultValue()),
+ iterator_result, value, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSIteratorResultDone()),
+ iterator_result, done, effect, control);
+
+ controls[1] = control;
+ effects[1] = effect;
+ }
+
+ // Continue with next loop index.
+ loop->ReplaceInput(1, graph()->NewNode(common()->IfTrue(), branch1));
+ eloop->ReplaceInput(1, etrue0);
+ iloop->ReplaceInput(1, index);
+ }
+ }
+
+ control = effects[2] = graph()->NewNode(common()->Merge(2), 2, controls);
+ effect = graph()->NewNode(common()->EffectPhi(2), 3, effects);
+ }
+
+ // Yield the final {iterator_result}.
+ ReplaceWithValue(node, iterator_result, effect, control);
+ return Replace(iterator_result);
+}
+
// ES6 section 20.3.3.1 Date.now ( )
Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
NodeProperties::RemoveValueInputs(node);
@@ -1252,6 +1584,114 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
return NoChange();
}
+// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
+Reduction JSBuiltinReducer::ReduceFunctionBind(Node* node) {
+ // Value inputs to the {node} are as follows:
+ //
+ // - target, which is Function.prototype.bind JSFunction
+ // - receiver, which is the [[BoundTargetFunction]]
+ // - bound_this (optional), which is the [[BoundThis]]
+ // - and all the remaining value inouts are [[BoundArguments]]
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Type* receiver_type = NodeProperties::GetType(receiver);
+ Node* bound_this = (node->op()->ValueInputCount() < 3)
+ ? jsgraph()->UndefinedConstant()
+ : NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (receiver_type->IsHeapConstant() &&
+ receiver_type->AsHeapConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> target_function =
+ Handle<JSFunction>::cast(receiver_type->AsHeapConstant()->Value());
+
+ // Check that the "length" property on the {target_function} is the
+ // default JSFunction accessor.
+ LookupIterator length_lookup(target_function, factory()->length_string(),
+ target_function, LookupIterator::OWN);
+ if (length_lookup.state() != LookupIterator::ACCESSOR ||
+ !length_lookup.GetAccessors()->IsAccessorInfo()) {
+ return NoChange();
+ }
+
+ // Check that the "name" property on the {target_function} is the
+ // default JSFunction accessor.
+ LookupIterator name_lookup(target_function, factory()->name_string(),
+ target_function, LookupIterator::OWN);
+ if (name_lookup.state() != LookupIterator::ACCESSOR ||
+ !name_lookup.GetAccessors()->IsAccessorInfo()) {
+ return NoChange();
+ }
+
+ // Determine the prototype of the {target_function}.
+ Handle<Object> prototype(target_function->map()->prototype(), isolate());
+
+ // Setup the map for the JSBoundFunction instance.
+ Handle<Map> map = target_function->IsConstructor()
+ ? isolate()->bound_function_with_constructor_map()
+ : isolate()->bound_function_without_constructor_map();
+ if (map->prototype() != *prototype) {
+ map = Map::TransitionToPrototype(map, prototype);
+ }
+ DCHECK_EQ(target_function->IsConstructor(), map->is_constructor());
+
+ // Create the [[BoundArguments]] for the result.
+ Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
+ if (node->op()->ValueInputCount() > 3) {
+ int const length = node->op()->ValueInputCount() - 3;
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ bound_arguments = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
+ jsgraph()->Constant(FixedArray::SizeFor(length)), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), bound_arguments,
+ jsgraph()->FixedArrayMapConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
+ bound_arguments, jsgraph()->Constant(length), effect, control);
+ for (int i = 0; i < length; ++i) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)),
+ bound_arguments, NodeProperties::GetValueInput(node, 3 + i), effect,
+ control);
+ }
+ bound_arguments = effect =
+ graph()->NewNode(common()->FinishRegion(), bound_arguments, effect);
+ }
+
+ // Create the JSBoundFunction result.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect = graph()->NewNode(
+ simplified()->Allocate(Type::BoundFunction(), NOT_TENURED),
+ jsgraph()->Constant(JSBoundFunction::kSize), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, jsgraph()->Constant(map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSBoundFunctionBoundTargetFunction()),
+ value, receiver, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSBoundFunctionBoundThis()),
+ value, bound_this, effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForJSBoundFunctionBoundArguments()),
+ value, bound_arguments, effect, control);
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ return NoChange();
+}
+
// ES6 section 18.2.2 isFinite ( number )
Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1280,6 +1720,86 @@ Reduction JSBuiltinReducer::ReduceGlobalIsNaN(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceMapGet(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+
+ Node* storage = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* index = effect = graph()->NewNode(
+ simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ jsgraph()->MinusOneConstant());
+
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ // Key not found.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ // Key found.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), storage,
+ index, efalse, if_false);
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+Reduction JSBuiltinReducer::ReduceMapHas(Node* node) {
+ // We only optimize if we have target, receiver and key parameters.
+ if (node->op()->ValueInputCount() != 3) return NoChange();
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* key = NodeProperties::GetValueInput(node, 2);
+
+ if (!HasInstanceTypeWitness(receiver, effect, JS_MAP_TYPE)) return NoChange();
+
+ Node* storage = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSCollectionTable()), receiver,
+ effect, control);
+
+ Node* index = effect = graph()->NewNode(
+ simplified()->LookupHashStorageIndex(), storage, key, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), index,
+ jsgraph()->MinusOneConstant());
+ Node* branch = graph()->NewNode(common()->Branch(), check, control);
+
+ // Key not found.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->FalseConstant();
+
+ // Key found.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 20.2.2.1 Math.abs ( x )
Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
JSCallReduction r(node);
@@ -1788,7 +2308,7 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
- DCHECK(base::bits::IsPowerOfTwo32(capacity));
+ DCHECK(base::bits::IsPowerOfTwo(capacity));
int length = NameDictionary::EntryToIndex(capacity);
int size = NameDictionary::SizeFor(length);
@@ -1821,14 +2341,11 @@ Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
// Initialize Dictionary fields.
Node* undefined = jsgraph()->UndefinedConstant();
effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
- value, undefined, effect, control);
- effect = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForDictionaryNextEnumerationIndex()),
value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
control);
- // Initialize hte Properties fields.
+ // Initialize the Properties fields.
for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
index < length; index++) {
effect = graph()->NewNode(
@@ -1899,7 +2416,10 @@ Node* GetStringWitness(Node* node) {
// it's {receiver}, and if so use that renaming as {receiver} for
// the lowering below.
for (Node* dominator = effect;;) {
- if (dominator->opcode() == IrOpcode::kCheckString &&
+ if ((dominator->opcode() == IrOpcode::kCheckString ||
+ dominator->opcode() == IrOpcode::kCheckInternalizedString ||
+ dominator->opcode() == IrOpcode::kCheckSeqString ||
+ dominator->opcode() == IrOpcode::kCheckNonEmptyString) &&
NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
return dominator;
}
@@ -2260,6 +2780,30 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
return NoChange();
}
+Reduction JSBuiltinReducer::ReduceStringToLowerCaseIntl(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToLowerCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
+Reduction JSBuiltinReducer::ReduceStringToUpperCaseIntl(Node* node) {
+ if (Node* receiver = GetStringWitness(node)) {
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, receiver);
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->StringToUpperCaseIntl());
+ NodeProperties::SetType(node, Type::String());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* node, InstanceType instance_type, FieldAccess const& access) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -2324,12 +2868,35 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceDateNow(node);
case kDateGetTime:
return ReduceDateGetTime(node);
+ case kFunctionBind:
+ return ReduceFunctionBind(node);
case kGlobalIsFinite:
reduction = ReduceGlobalIsFinite(node);
break;
case kGlobalIsNaN:
reduction = ReduceGlobalIsNaN(node);
break;
+ case kMapEntries:
+ return ReduceCollectionIterator(
+ node, JS_MAP_TYPE, Context::MAP_KEY_VALUE_ITERATOR_MAP_INDEX);
+ case kMapGet:
+ reduction = ReduceMapGet(node);
+ break;
+ case kMapHas:
+ reduction = ReduceMapHas(node);
+ break;
+ case kMapKeys:
+ return ReduceCollectionIterator(node, JS_MAP_TYPE,
+ Context::MAP_KEY_ITERATOR_MAP_INDEX);
+ case kMapSize:
+ return ReduceCollectionSize(node, JS_MAP_TYPE);
+ case kMapValues:
+ return ReduceCollectionIterator(node, JS_MAP_TYPE,
+ Context::MAP_VALUE_ITERATOR_MAP_INDEX);
+ case kMapIteratorNext:
+ return ReduceCollectionIteratorNext(node, OrderedHashMap::kEntrySize,
+ FIRST_MAP_ITERATOR_TYPE,
+ LAST_MAP_ITERATOR_TYPE);
case kMathAbs:
reduction = ReduceMathAbs(node);
break;
@@ -2447,6 +3014,18 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kObjectCreate:
reduction = ReduceObjectCreate(node);
break;
+ case kSetEntries:
+ return ReduceCollectionIterator(
+ node, JS_SET_TYPE, Context::SET_KEY_VALUE_ITERATOR_MAP_INDEX);
+ case kSetSize:
+ return ReduceCollectionSize(node, JS_SET_TYPE);
+ case kSetValues:
+ return ReduceCollectionIterator(node, JS_SET_TYPE,
+ Context::SET_VALUE_ITERATOR_MAP_INDEX);
+ case kSetIteratorNext:
+ return ReduceCollectionIteratorNext(node, OrderedHashSet::kEntrySize,
+ FIRST_SET_ITERATOR_TYPE,
+ LAST_SET_ITERATOR_TYPE);
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
@@ -2462,6 +3041,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceStringIterator(node);
case kStringIteratorNext:
return ReduceStringIteratorNext(node);
+ case kStringToLowerCaseIntl:
+ return ReduceStringToLowerCaseIntl(node);
+ case kStringToUpperCaseIntl:
+ return ReduceStringToUpperCaseIntl(node);
case kDataViewByteLength:
return ReduceArrayBufferViewAccessor(
node, JS_DATA_VIEW_TYPE,
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 736ece34e4..db8bd74dd9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Handle<Context> native_context);
~JSBuiltinReducer() final {}
+ const char* reducer_name() const override { return "JSBuiltinReducer"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -59,10 +61,22 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
Reduction ReduceArrayShift(Node* node);
+ Reduction ReduceCollectionIterator(Node* node,
+ InstanceType collection_instance_type,
+ int collection_iterator_map_index);
+ Reduction ReduceCollectionSize(Node* node,
+ InstanceType collection_instance_type);
+ Reduction ReduceCollectionIteratorNext(
+ Node* node, int entry_size,
+ InstanceType collection_iterator_instance_type_first,
+ InstanceType collection_iterator_instance_type_last);
Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
+ Reduction ReduceFunctionBind(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
+ Reduction ReduceMapHas(Node* node);
+ Reduction ReduceMapGet(Node* node);
Reduction ReduceMathAbs(Node* node);
Reduction ReduceMathAcos(Node* node);
Reduction ReduceMathAcosh(Node* node);
@@ -109,6 +123,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
+ Reduction ReduceStringToLowerCaseIntl(Node* node);
+ Reduction ReduceStringToUpperCaseIntl(Node* node);
Reduction ReduceArrayBufferViewAccessor(Node* node,
InstanceType instance_type,
FieldAccess const& access);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 1e1d3a92ab..ca82afad8c 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -7,6 +7,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -23,10 +24,14 @@ Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
return ReduceJSConstruct(node);
+ case IrOpcode::kJSConstructWithArrayLike:
+ return ReduceJSConstructWithArrayLike(node);
case IrOpcode::kJSConstructWithSpread:
return ReduceJSConstructWithSpread(node);
case IrOpcode::kJSCall:
return ReduceJSCall(node);
+ case IrOpcode::kJSCallWithArrayLike:
+ return ReduceJSCallWithArrayLike(node);
case IrOpcode::kJSCallWithSpread:
return ReduceJSCallWithSpread(node);
default:
@@ -35,6 +40,23 @@ Reduction JSCallReducer::Reduce(Node* node) {
return NoChange();
}
+void JSCallReducer::Finalize() {
+ // TODO(turbofan): This is not the best solution; ideally we would be able
+ // to teach the GraphReducer about arbitrary dependencies between different
+ // nodes, even if they don't show up in the use list of the other node.
+ std::set<Node*> const waitlist = std::move(waitlist_);
+ for (Node* node : waitlist) {
+ if (!node->IsDead()) {
+ Reduction const reduction = Reduce(node);
+ if (reduction.Changed()) {
+ Node* replacement = reduction.replacement();
+ if (replacement != node) {
+ Replace(node, replacement);
+ }
+ }
+ }
+ }
+}
// ES6 section 22.1.1 The Array Constructor
Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
@@ -94,18 +116,48 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+namespace {
+
+bool CanBeNullOrUndefined(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateKeyValueArray:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructForwardVarargs:
+ case IrOpcode::kJSConstructWithSpread:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToBoolean:
+ case IrOpcode::kJSToInteger:
+ case IrOpcode::kJSToLength:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToNumber:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
+ return false;
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> value = HeapObjectMatcher(node).Value();
+ Isolate* const isolate = value->GetIsolate();
+ return value->IsNull(isolate) || value->IsUndefined(isolate);
+ }
+ default:
+ return true;
+ }
+}
+
+} // namespace
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* target = NodeProperties::GetValueInput(node, 0);
CallParameters const& p = CallParametersOf(node->op());
- // Tail calls to Function.prototype.apply are not properly supported
- // down the pipeline, so we disable this optimization completely for
- // tail calls (for now).
- if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
- Handle<JSFunction> apply =
- Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
size_t arity = p.arity();
DCHECK_LE(2u, arity);
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
@@ -118,97 +170,101 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// The argArray was not provided, just remove the {target}.
node->RemoveInput(0);
--arity;
- } else if (arity == 4) {
- // Check if argArray is an arguments object, and {node} is the only value
- // user of argArray (except for value uses in frame states).
- Node* arg_array = NodeProperties::GetValueInput(node, 3);
- if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
- for (Edge edge : arg_array->use_edges()) {
- Node* const user = edge.from();
- if (user == node) continue;
- // Ignore uses as frame state's locals or parameters.
- if (user->opcode() == IrOpcode::kStateValues) continue;
- // Ignore uses as frame state's accumulator.
- if (user->opcode() == IrOpcode::kFrameState &&
- user->InputAt(2) == arg_array) {
- continue;
- }
- if (!NodeProperties::IsValueEdge(edge)) continue;
- return NoChange();
- }
- // Check if the arguments can be handled in the fast case (i.e. we don't
- // have aliased sloppy arguments), and compute the {start_index} for
- // rest parameters.
- CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int start_index = 0;
- // Determine the formal parameter count;
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- int formal_parameter_count = shared->internal_formal_parameter_count();
- if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) that are aliased can only be handled
- // here if there's no side-effect between the {node} and the {arg_array}.
- // TODO(turbofan): Further relax this constraint.
- if (formal_parameter_count != 0) {
- Node* effect = NodeProperties::GetEffectInput(node);
- while (effect != arg_array) {
- if (effect->op()->EffectInputCount() != 1 ||
- !(effect->op()->properties() & Operator::kNoWrite)) {
- return NoChange();
- }
- effect = NodeProperties::GetEffectInput(effect);
- }
+ } else {
+ Node* target = NodeProperties::GetValueInput(node, 1);
+ Node* this_argument = NodeProperties::GetValueInput(node, 2);
+ Node* arguments_list = NodeProperties::GetValueInput(node, 3);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // If {arguments_list} cannot be null or undefined, we don't need
+ // to expand this {node} to control-flow.
+ if (!CanBeNullOrUndefined(arguments_list)) {
+ // Massage the value inputs appropriately.
+ node->ReplaceInput(0, target);
+ node->ReplaceInput(1, this_argument);
+ node->ReplaceInput(2, arguments_list);
+ while (arity-- > 3) node->RemoveInput(3);
+
+ // Morph the {node} to a {JSCallWithArrayLike}.
+ NodeProperties::ChangeOp(node,
+ javascript()->CallWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSCallWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ } else {
+ // Check whether {arguments_list} is null.
+ Node* check_null =
+ graph()->NewNode(simplified()->ReferenceEqual(), arguments_list,
+ jsgraph()->NullConstant());
+ control = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_null, control);
+ Node* if_null = graph()->NewNode(common()->IfTrue(), control);
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Check whether {arguments_list} is undefined.
+ Node* check_undefined =
+ graph()->NewNode(simplified()->ReferenceEqual(), arguments_list,
+ jsgraph()->UndefinedConstant());
+ control = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check_undefined, control);
+ Node* if_undefined = graph()->NewNode(common()->IfTrue(), control);
+ control = graph()->NewNode(common()->IfFalse(), control);
+
+ // Lower to {JSCallWithArrayLike} if {arguments_list} is neither null
+ // nor undefined.
+ Node* effect0 = effect;
+ Node* control0 = control;
+ Node* value0 = effect0 = control0 = graph()->NewNode(
+ javascript()->CallWithArrayLike(p.frequency()), target, this_argument,
+ arguments_list, context, frame_state, effect0, control0);
+
+ // Lower to {JSCall} if {arguments_list} is either null or undefined.
+ Node* effect1 = effect;
+ Node* control1 =
+ graph()->NewNode(common()->Merge(2), if_null, if_undefined);
+ Node* value1 = effect1 = control1 =
+ graph()->NewNode(javascript()->Call(2), target, this_argument,
+ context, frame_state, effect1, control1);
+
+ // Rewire potential exception edges.
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ // Create appropriate {IfException} and {IfSuccess} nodes.
+ Node* if_exception0 =
+ graph()->NewNode(common()->IfException(), control0, effect0);
+ control0 = graph()->NewNode(common()->IfSuccess(), control0);
+ Node* if_exception1 =
+ graph()->NewNode(common()->IfException(), control1, effect1);
+ control1 = graph()->NewNode(common()->IfSuccess(), control1);
+
+ // Join the exception edges.
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), if_exception0, if_exception1);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), if_exception0,
+ if_exception1, merge);
+ Node* phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_exception0, if_exception1, merge);
+ ReplaceWithValue(if_exception, phi, ephi, merge);
}
- } else if (type == CreateArgumentsType::kRestParameter) {
- start_index = formal_parameter_count;
- }
- // Check if are applying to inlined arguments or to the arguments of
- // the outermost function.
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) {
- // Reduce {node} to a JSCallForwardVarargs operation, which just
- // re-pushes the incoming arguments and calls the {target}.
- node->RemoveInput(0); // Function.prototype.apply
- node->RemoveInput(2); // arguments
- NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
- 2, start_index, p.tail_call_mode()));
- return Changed(node);
- }
- // Get to the actual frame state from which to extract the arguments;
- // we can only optimize this in case the {node} was already inlined into
- // some other function (and same for the {arg_array}).
- FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
- if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
- // Need to take the parameters from the arguments adaptor.
- frame_state = outer_state;
- }
- // Remove the argArray input from the {node}.
- node->RemoveInput(static_cast<int>(--arity));
- // Add the actual parameters to the {node}, skipping the receiver,
- // starting from {start_index}.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
- node->InsertInput(graph()->zone(), static_cast<int>(arity),
- parameters->InputAt(i));
- ++arity;
+
+ // Join control paths.
+ control = graph()->NewNode(common()->Merge(2), control0, control1);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), effect0, effect1, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ value0, value1, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- // Drop the {target} from the {node}.
- node->RemoveInput(0);
- --arity;
- } else {
- return NoChange();
}
// Change {node} to the new {JSCall} operator.
NodeProperties::ChangeOp(
node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
- p.tail_call_mode()));
- // Change context of {node} to the Function.prototype.apply context,
- // to ensure any exception is thrown in the correct context.
- NodeProperties::ReplaceContextInput(
- node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -244,8 +300,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
}
NodeProperties::ChangeOp(
node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
- p.tail_call_mode()));
+ javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -288,17 +343,12 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(object, effect, &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
- Handle<Map> candidate_map(
- object_maps[0]->GetPrototypeChainRootMap(isolate()));
+ Handle<Map> candidate_map = object_maps[0];
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
- // We cannot deal with primitives here.
- if (candidate_map->IsPrimitiveMap()) return NoChange();
-
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
- Handle<Map> const object_map(
- object_maps[i]->GetPrototypeChainRootMap(isolate()));
+ Handle<Map> object_map = object_maps[i];
if (object_map->IsSpecialReceiverMap() ||
object_map->has_hidden_prototype() ||
object_map->prototype() != *candidate_prototype) {
@@ -307,6 +357,9 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
// with hidden prototypes at this point.
return NoChange();
}
+ // The above check also excludes maps for primitive values, which is
+ // important because we are not applying [[ToObject]] here as expected.
+ DCHECK(!object_map->IsPrimitiveMap() && object_map->IsJSReceiverMap());
if (result == NodeProperties::kUnreliableReceiverMaps &&
!object_map->is_stable()) {
return NoChange();
@@ -341,6 +394,83 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return ReduceObjectGetPrototype(node, receiver);
}
+// ES #sec-object.prototype.isprototypeof
+Reduction JSCallReducer::ReduceObjectPrototypeIsPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* value = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Ensure that the {receiver} is known to be a JSReceiver (so that
+ // the ToObject step of Object.prototype.isPrototypeOf is a no-op).
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ if (!receiver_maps[i]->IsJSReceiverMap()) return NoChange();
+ }
+
+ // We don't check whether {value} is a proper JSReceiver here explicitly,
+ // and don't explicitly rule out Primitive {value}s, since all of them
+ // have null as their prototype, so the prototype chain walk inside the
+ // JSHasInPrototypeChain operator immediately aborts and yields false.
+ NodeProperties::ReplaceValueInput(node, value, 0);
+ NodeProperties::ReplaceValueInput(node, receiver, 1);
+ for (int i = node->op()->ValueInputCount(); i-- > 2;) {
+ node->RemoveInput(i);
+ }
+ NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
+ return Changed(node);
+}
+
+// ES6 section 26.1.1 Reflect.apply ( target, thisArgument, argumentsList )
+Reduction JSCallReducer::ReduceReflectApply(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ DCHECK_LE(0, arity);
+ // Massage value inputs appropriately.
+ node->RemoveInput(0);
+ node->RemoveInput(0);
+ while (arity < 3) {
+ node->InsertInput(graph()->zone(), arity++, jsgraph()->UndefinedConstant());
+ }
+ while (arity-- > 3) {
+ node->RemoveInput(arity);
+ }
+ NodeProperties::ChangeOp(node,
+ javascript()->CallWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSCallWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
+// ES6 section 26.1.2 Reflect.construct ( target, argumentsList [, newTarget] )
+Reduction JSCallReducer::ReduceReflectConstruct(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int arity = static_cast<int>(p.arity() - 2);
+ DCHECK_LE(0, arity);
+ // Massage value inputs appropriately.
+ node->RemoveInput(0);
+ node->RemoveInput(0);
+ while (arity < 2) {
+ node->InsertInput(graph()->zone(), arity++, jsgraph()->UndefinedConstant());
+ }
+ if (arity < 3) {
+ node->InsertInput(graph()->zone(), arity++, node->InputAt(0));
+ }
+ while (arity-- > 3) {
+ node->RemoveInput(arity);
+ }
+ NodeProperties::ChangeOp(node,
+ javascript()->ConstructWithArrayLike(p.frequency()));
+ Reduction const reduction = ReduceJSConstructWithArrayLike(node);
+ return reduction.Changed() ? reduction : Changed(node);
+}
+
// ES6 section 26.1.7 Reflect.getPrototypeOf ( target )
Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -350,6 +480,346 @@ Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
return ReduceObjectGetPrototype(node, target);
}
+bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype);
+}
+
+Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) {
+ return NoChange();
+ }
+ if (receiver_maps.size() != 1) return NoChange();
+ Handle<Map> receiver_map(receiver_maps[0]);
+ ElementsKind kind = receiver_map->elements_kind();
+ // TODO(danno): Handle double packed elements
+ if (!IsFastElementsKind(kind) || IsDoubleElementsKind(kind) ||
+ !CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+
+ // TODO(danno): forEach can throw. Hook up exceptional edges.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->array_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ Node* orig_map = jsgraph()->HeapConstant(receiver_map);
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+
+ // Make sure that the access is still in bounds, since the callback could have
+ // changed the array's size.
+ Node* length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+ k = effect =
+ graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
+
+ // Reload the elements pointer before calling the callback, since the previous
+ // callback might have resized the array causing the elements buffer to be
+ // re-allocated.
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ Node* element = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ elements, k, effect, control);
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
+ checkpoint_params[3] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ k = next_k;
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
+ return Replace(jsgraph()->UndefinedConstant());
+}
+
+Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result != NodeProperties::kReliableReceiverMaps) {
+ return NoChange();
+ }
+ if (receiver_maps.size() != 1) return NoChange();
+ Handle<Map> receiver_map(receiver_maps[0]);
+ ElementsKind kind = receiver_map->elements_kind();
+ // TODO(danno): Handle holey Smi and Object fast elements kinds and double
+ // packed.
+ if (!IsFastPackedElementsKind(kind) || IsDoubleElementsKind(kind)) {
+ return NoChange();
+ }
+
+ // TODO(danno): map can throw. Hook up exceptional edges.
+ if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+ // We want the input to be a generic Array.
+ const int map_index = Context::ArrayMapIndex(kind);
+ Handle<JSFunction> handle_constructor(
+ JSFunction::cast(
+ Map::cast(native_context()->get(map_index))->GetConstructor()),
+ isolate());
+ Node* array_constructor = jsgraph()->HeapConstant(handle_constructor);
+ if (receiver_map->prototype() !=
+ native_context()->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ return NoChange();
+ }
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+ Node* orig_map = jsgraph()->HeapConstant(receiver_map);
+
+ // Make sure the map hasn't changed before we construct the output array.
+ {
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+ }
+
+ Node* original_length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ // This array should be HOLEY_SMI_ELEMENTS because of the non-zero length.
+ Node* a = control = effect = graph()->NewNode(
+ javascript()->CreateArray(1, Handle<AllocationSite>::null()),
+ array_constructor, array_constructor, original_length, context,
+ outer_frame_state, effect, control);
+
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, a, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayMapLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ Node* array_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, effect, control);
+ Node* check_map =
+ graph()->NewNode(simplified()->ReferenceEqual(), array_map, orig_map);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check_map, effect, control);
+
+ // Make sure that the access is still in bounds, since the callback could have
+ // changed the array's size.
+ Node* length = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+ k = effect =
+ graph()->NewNode(simplified()->CheckBounds(), k, length, effect, control);
+
+ // Reload the elements pointer before calling the callback, since the previous
+ // callback might have resized the array causing the elements buffer to be
+ // re-allocated.
+ Node* elements = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ Node* element = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
+ elements, k, effect, control);
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ // This frame state is dealt with by hand in
+ // ArrayMapLoopLazyDeoptContinuation.
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayMapLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ Node* callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ Handle<Map> double_map(Map::cast(
+ native_context()->get(Context::ArrayMapIndex(HOLEY_DOUBLE_ELEMENTS))));
+ Handle<Map> fast_map(
+ Map::cast(native_context()->get(Context::ArrayMapIndex(HOLEY_ELEMENTS))));
+ effect = graph()->NewNode(
+ simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
+ callback_value, effect, control);
+
+ k = next_k;
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ ReplaceWithValue(node, a, effect, control);
+ return Replace(a);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, Handle<FunctionTemplateInfo> function_template_info) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -439,38 +909,106 @@ Reduction JSCallReducer::ReduceCallApiFunction(
return Changed(node);
}
-Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
- DCHECK(node->opcode() == IrOpcode::kJSCallWithSpread ||
+namespace {
+
+// Check whether elements aren't mutated; we play it extremely safe here by
+// explicitly checking that {node} is only used by {LoadField} or {LoadElement}.
+bool IsSafeArgumentsElements(Node* node) {
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
+ if (edge.from()->opcode() != IrOpcode::kLoadField &&
+ edge.from()->opcode() != IrOpcode::kLoadElement) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
+ Node* node, int arity, CallFrequency const& frequency) {
+ DCHECK(node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
node->opcode() == IrOpcode::kJSConstructWithSpread);
- // Do check to make sure we can actually avoid iteration.
- if (!isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+ // In case of a call/construct with spread, we need to
+ // ensure that it's safe to avoid the actual iteration.
+ if ((node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) &&
+ !isolate()->initial_array_iterator_prototype_map()->is_stable()) {
return NoChange();
}
- Node* spread = NodeProperties::GetValueInput(node, arity);
-
- // Check if spread is an arguments object, and {node} is the only value user
- // of spread (except for value uses in frame states).
- if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
- for (Edge edge : spread->use_edges()) {
+ // Check if {arguments_list} is an arguments object, and {node} is the only
+ // value user of {arguments_list} (except for value uses in frame states).
+ Node* arguments_list = NodeProperties::GetValueInput(node, arity);
+ if (arguments_list->opcode() != IrOpcode::kJSCreateArguments) {
+ return NoChange();
+ }
+ for (Edge edge : arguments_list->use_edges()) {
+ if (!NodeProperties::IsValueEdge(edge)) continue;
Node* const user = edge.from();
- if (user == node) continue;
- // Ignore uses as frame state's locals or parameters.
- if (user->opcode() == IrOpcode::kStateValues) continue;
- // Ignore uses as frame state's accumulator.
- if (user->opcode() == IrOpcode::kFrameState && user->InputAt(2) == spread) {
- continue;
+ switch (user->opcode()) {
+ case IrOpcode::kCheckMaps:
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ case IrOpcode::kReferenceEqual:
+ case IrOpcode::kReturn:
+ // Ignore safe uses that definitely don't mess with the arguments.
+ continue;
+ case IrOpcode::kLoadField: {
+ DCHECK_EQ(arguments_list, user->InputAt(0));
+ FieldAccess const& access = FieldAccessOf(user->op());
+ if (access.offset == JSArray::kLengthOffset) {
+ // Ignore uses for arguments#length.
+ STATIC_ASSERT(JSArray::kLengthOffset ==
+ JSArgumentsObject::kLengthOffset);
+ continue;
+ } else if (access.offset == JSObject::kElementsOffset) {
+ // Ignore safe uses for arguments#elements.
+ if (IsSafeArgumentsElements(user)) continue;
+ }
+ break;
+ }
+ case IrOpcode::kJSCallWithArrayLike:
+ // Ignore uses as argumentsList input to calls with array like.
+ if (user->InputAt(2) == arguments_list) continue;
+ break;
+ case IrOpcode::kJSConstructWithArrayLike:
+ // Ignore uses as argumentsList input to calls with array like.
+ if (user->InputAt(1) == arguments_list) continue;
+ break;
+ case IrOpcode::kJSCallWithSpread: {
+ // Ignore uses as spread input to calls with spread.
+ SpreadWithArityParameter p = SpreadWithArityParameterOf(user->op());
+ int const arity = static_cast<int>(p.arity() - 1);
+ if (user->InputAt(arity) == arguments_list) continue;
+ break;
+ }
+ case IrOpcode::kJSConstructWithSpread: {
+ // Ignore uses as spread input to construct with spread.
+ SpreadWithArityParameter p = SpreadWithArityParameterOf(user->op());
+ int const arity = static_cast<int>(p.arity() - 2);
+ if (user->InputAt(arity) == arguments_list) continue;
+ break;
+ }
+ default:
+ break;
}
- if (!NodeProperties::IsValueEdge(edge)) continue;
+ // We cannot currently reduce the {node} to something better than what
+ // it already is, but we might be able to do something about the {node}
+ // later, so put it on the waitlist and try again during finalization.
+ waitlist_.insert(node);
return NoChange();
}
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
- // some other function (and same for the {spread}).
- CreateArgumentsType const type = CreateArgumentsTypeOf(spread->op());
- Node* frame_state = NodeProperties::GetFrameStateInput(spread);
+ // some other function (and same for the {arguments_list}).
+ CreateArgumentsType const type = CreateArgumentsTypeOf(arguments_list->op());
+ Node* frame_state = NodeProperties::GetFrameStateInput(arguments_list);
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
int start_index = 0;
// Determine the formal parameter count;
@@ -483,7 +1021,7 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
// TODO(turbofan): Further relax this constraint.
if (formal_parameter_count != 0) {
Node* effect = NodeProperties::GetEffectInput(node);
- while (effect != spread) {
+ while (effect != arguments_list) {
if (effect->op()->EffectInputCount() != 1 ||
!(effect->op()->properties() & Operator::kNoWrite)) {
return NoChange();
@@ -494,26 +1032,35 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
} else if (type == CreateArgumentsType::kRestParameter) {
start_index = formal_parameter_count;
- // Only check the array iterator protector when we have a rest object.
- if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+ // For spread calls/constructs with rest parameters we need to ensure that
+ // the array iterator protector is intact, which guards that the rest
+ // parameter iteration is not observable.
+ if (node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) {
+ if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ }
}
- // Install appropriate code dependencies.
- dependencies()->AssumeMapStable(
- isolate()->initial_array_iterator_prototype_map());
- if (type == CreateArgumentsType::kRestParameter) {
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ // For call/construct with spread, we need to also install a code
+ // dependency on the initial %ArrayIteratorPrototype% map here to
+ // ensure that no one messes with the next method.
+ if (node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread) {
+ dependencies()->AssumeMapStable(
+ isolate()->initial_array_iterator_prototype_map());
}
- // Remove the spread input from the {node}.
+
+ // Remove the {arguments_list} input from the {node}.
node->RemoveInput(arity--);
// Check if are spreading to inlined arguments or to the arguments of
// the outermost function.
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
if (outer_state->opcode() != IrOpcode::kFrameState) {
Operator const* op =
- (node->opcode() == IrOpcode::kJSCallWithSpread)
- ? javascript()->CallForwardVarargs(arity + 1, start_index,
- TailCallMode::kDisallow)
+ (node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread)
+ ? javascript()->CallForwardVarargs(arity + 1, start_index)
: javascript()->ConstructForwardVarargs(arity + 2, start_index);
NodeProperties::ChangeOp(node, op);
return Changed(node);
@@ -533,16 +1080,16 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
parameters->InputAt(i));
}
- // TODO(turbofan): Collect call counts on spread call/construct and thread it
- // through here.
- if (node->opcode() == IrOpcode::kJSCallWithSpread) {
- NodeProperties::ChangeOp(node, javascript()->Call(arity + 1));
- Reduction const r = ReduceJSCall(node);
- return r.Changed() ? r : Changed(node);
+ if (node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread) {
+ NodeProperties::ChangeOp(node, javascript()->Call(arity + 1, frequency));
+ Reduction const reduction = ReduceJSCall(node);
+ return reduction.Changed() ? reduction : Changed(node);
} else {
- NodeProperties::ChangeOp(node, javascript()->Construct(arity + 2));
- Reduction const r = ReduceJSConstruct(node);
- return r.Changed() ? r : Changed(node);
+ NodeProperties::ChangeOp(node,
+ javascript()->Construct(arity + 2, frequency));
+ Reduction const reduction = ReduceJSConstruct(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
}
@@ -614,8 +1161,20 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceObjectGetPrototypeOf(node);
case Builtins::kObjectPrototypeGetProto:
return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kObjectPrototypeIsPrototypeOf:
+ return ReduceObjectPrototypeIsPrototypeOf(node);
+ case Builtins::kReflectApply:
+ return ReduceReflectApply(node);
+ case Builtins::kReflectConstruct:
+ return ReduceReflectConstruct(node);
case Builtins::kReflectGetPrototypeOf:
return ReduceReflectGetPrototypeOf(node);
+ case Builtins::kArrayForEach:
+ return ReduceArrayForEach(function, node);
+ case Builtins::kArrayMap:
+ return ReduceArrayMap(function, node);
+ case Builtins::kReturnReceiver:
+ return ReduceReturnReceiver(node);
default:
break;
}
@@ -658,9 +1217,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
arity++;
}
NodeProperties::ChangeOp(
- node,
- javascript()->Call(arity, p.frequency(), VectorSlotPair(),
- convert_mode, p.tail_call_mode()));
+ node, javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+ convert_mode));
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -675,27 +1233,12 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
if (nexus.IsUninitialized()) {
- // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
- if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
-
- // Insert a CallIC here to collect feedback for uninitialized calls.
- int const arg_count = static_cast<int>(p.arity() - 2);
- Callable callable = CodeFactory::CallIC(isolate(), p.convert_mode());
- CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
- flags);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Constant(arg_count);
- Node* slot_index =
- jsgraph()->Constant(FeedbackVector::GetIndex(p.feedback().slot()));
- Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, stub_arity);
- node->InsertInput(graph()->zone(), 3, slot_index);
- node->InsertInput(graph()->zone(), 4, feedback_vector);
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
+ if (flags() & kBailoutOnUninitialized) {
+ // Introduce a SOFT deopt if the call {node} wasn't executed so far.
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
+ }
+ return NoChange();
}
Handle<Object> feedback(nexus.GetFeedback(), isolate());
@@ -740,13 +1283,22 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
+ CallFrequency frequency = CallFrequencyOf(node->op());
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, 2, frequency);
+}
+
Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 1);
- return ReduceSpreadCall(node, arity);
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
+ CallFrequency frequency;
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency);
}
Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
@@ -805,8 +1357,18 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+ // Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
+ if (nexus.IsUninitialized()) {
+ if (flags() & kBailoutOnUninitialized) {
+ // Introduce a SOFT deopt if the construct {node} wasn't executed so far.
+ return ReduceSoftDeoptimize(
+ node, DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
+ }
+ return NoChange();
+ }
+
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// The feedback is an AllocationSite, which means we have called the
@@ -864,13 +1426,45 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
+ CallFrequency frequency = CallFrequencyOf(node->op());
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, 1, frequency);
+}
+
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
DCHECK_LE(3u, p.arity());
int arity = static_cast<int>(p.arity() - 2);
- return ReduceSpreadCall(node, arity);
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
+ CallFrequency frequency;
+ return ReduceCallOrConstructWithArrayLikeOrSpread(node, arity, frequency);
+}
+
+Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ ReplaceWithValue(node, receiver);
+ return Replace(receiver);
+}
+
+Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
+ DeoptimizeReason reason) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* frame_state = NodeProperties::FindFrameStateBefore(node);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ frame_state, effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
}
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 31326084cc..a6598e82d5 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/deoptimize-reason.h"
namespace v8 {
namespace internal {
@@ -18,6 +19,7 @@ class Factory;
namespace compiler {
// Forward declarations.
+class CallFrequency;
class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
@@ -27,16 +29,27 @@ class SimplifiedOperatorBuilder;
// which might allow inlining or other optimizations to be performed afterwards.
class JSCallReducer final : public AdvancedReducer {
public:
- JSCallReducer(Editor* editor, JSGraph* jsgraph,
+ // Flags that control the mode of operation.
+ enum Flag { kNoFlags = 0u, kBailoutOnUninitialized = 1u << 0 };
+ typedef base::Flags<Flag> Flags;
+
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
Handle<Context> native_context,
CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
+ flags_(flags),
native_context_(native_context),
dependencies_(dependencies) {}
+ const char* reducer_name() const override { return "JSCallReducer"; }
+
Reduction Reduce(Node* node) final;
+ // Processes the waitlist gathered while the reducer was running,
+ // and does a final attempt to reduce the nodes in the waitlist.
+ void Finalize() final;
+
private:
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
@@ -49,12 +62,23 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceObjectGetPrototype(Node* node, Node* object);
Reduction ReduceObjectGetPrototypeOf(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
+ Reduction ReduceObjectPrototypeIsPrototypeOf(Node* node);
+ Reduction ReduceReflectApply(Node* node);
+ Reduction ReduceReflectConstruct(Node* node);
Reduction ReduceReflectGetPrototypeOf(Node* node);
- Reduction ReduceSpreadCall(Node* node, int arity);
+ Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
+ Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
+ Node* node, int arity, CallFrequency const& frequency);
Reduction ReduceJSConstruct(Node* node);
+ Reduction ReduceJSConstructWithArrayLike(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
+ Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
+ Reduction ReduceReturnReceiver(Node* node);
+
+ Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -65,11 +89,14 @@ class JSCallReducer final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
+ Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
+ Flags const flags_;
Handle<Context> const native_context_;
CompilationDependencies* const dependencies_;
+ std::set<Node*> waitlist_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index c9548ffd1c..e682490386 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -199,11 +199,6 @@ Isolate* JSContextSpecialization::isolate() const {
return jsgraph()->isolate();
}
-
-JSOperatorBuilder* JSContextSpecialization::javascript() const {
- return jsgraph()->javascript();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index 0cf2bc1e54..83949fa3cc 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -40,6 +40,10 @@ class JSContextSpecialization final : public AdvancedReducer {
outer_(outer),
closure_(closure) {}
+ const char* reducer_name() const override {
+ return "JSContextSpecialization";
+ }
+
Reduction Reduce(Node* node) final;
private:
@@ -53,7 +57,6 @@ class JSContextSpecialization final : public AdvancedReducer {
size_t new_depth);
Isolate* isolate() const;
- JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 57eedfada2..dcf1575884 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -148,7 +148,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastSmiOrObjectElements()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -161,7 +161,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
}
}
- } else if (boilerplate->HasFastDoubleElements()) {
+ } else if (boilerplate->HasDoubleElements()) {
if (elements->Size() > kMaxRegularHeapObjectSize) return false;
} else {
return false;
@@ -169,8 +169,10 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
// TODO(turbofan): Do we want to support out-of-object properties?
- Handle<FixedArray> properties(boilerplate->properties(), isolate);
- if (properties->length() > 0) return false;
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array()->length() == 0)) {
+ return false;
+ }
// Check the in-object properties.
Handle<DescriptorArray> descriptors(
@@ -200,8 +202,7 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
// performance of using object literals is not worse than using constructor
// functions, see crbug.com/v8/6211 for details.
const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties =
- (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
} // namespace
@@ -259,14 +260,11 @@ Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
// Force completion of inobject slack tracking before
// generating code to finalize the instance size.
original_constructor->CompleteInobjectSlackTrackingIfActive();
-
- // Compute instance size from initial map of {original_constructor}.
Handle<Map> initial_map(original_constructor->initial_map(), isolate());
int const instance_size = initial_map->instance_size();
// Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} of the {original_constructor}
- // changes.
+ // deoptimized whenever the {initial_map} changes.
dependencies()->AssumeInitialMapCantChange(initial_map);
// Emit code to allocate the JSObject instance for the
@@ -338,7 +336,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kFastNewSloppyArguments);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -382,7 +381,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Callable callable = Builtins::CallableFor(
+ isolate(), Builtins::kFastNewStrictArguments);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -422,11 +422,13 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), rest_length);
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS),
+ rest_length);
RelaxControls(node);
a.FinishAndChange(node);
} else {
- Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewRestParameter);
Operator::Properties properties = node->op()->properties();
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
@@ -539,7 +541,7 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
a.Store(AccessBuilder::ForMap(), jsarray_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS),
jsgraph()->Constant(length));
RelaxControls(node);
a.FinishAndChange(node);
@@ -558,27 +560,30 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
Type* const closure_type = NodeProperties::GetType(closure);
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
- // Extract constructor and original constructor function.
if (closure_type->IsHeapConstant()) {
DCHECK(closure_type->AsHeapConstant()->Value()->IsJSFunction());
Handle<JSFunction> js_function =
Handle<JSFunction>::cast(closure_type->AsHeapConstant()->Value());
JSFunction::EnsureHasInitialMap(js_function);
- Handle<Map> initial_map(js_function->initial_map());
- initial_map->CompleteInobjectSlackTracking();
+
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ js_function->CompleteInobjectSlackTrackingIfActive();
+ Handle<Map> initial_map(js_function->initial_map(), isolate());
DCHECK(initial_map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
// Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} of the {original_constructor}
- // changes.
+ // deoptimized whenever the {initial_map} changes.
dependencies()->AssumeInitialMapCantChange(initial_map);
+ // Allocate a register file.
DCHECK(js_function->shared()->HasBytecodeArray());
int size = js_function->shared()->bytecode_array()->register_count();
- Node* elements = effect = AllocateElements(
- effect, control, FAST_HOLEY_ELEMENTS, size, NOT_TENURED);
+ Node* register_file = effect =
+ AllocateElements(effect, control, HOLEY_ELEMENTS, size, NOT_TENURED);
+ // Emit code to allocate the JS[Async]GeneratorObject instance.
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(initial_map->instance_size());
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
@@ -594,12 +599,10 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
jsgraph()->Constant(JSGeneratorObject::kNext));
a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
- a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), elements);
+ a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), register_file);
if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
- a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos(),
- undefined);
a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
undefined);
}
@@ -680,14 +683,14 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
// deoptimize in this case.
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsSmiElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
}
}
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ } else if (IsDoubleElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::Number())) {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -728,10 +731,13 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
Type* new_target_type = NodeProperties::GetType(new_target);
+ Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(site);
- ElementsKind elements_kind = site->GetElementsKind();
+ ElementsKind elements_kind =
+ site.is_null() ? GetInitialFastElementsKind() : site->GetElementsKind();
AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ (site.is_null() || AllocationSite::ShouldTrack(elements_kind))
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
@@ -746,112 +752,37 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState, properties);
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
} else if (arity == 1) {
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
-
- if (IsHoleyElementsKind(elements_kind)) {
- ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
- }
-
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* length = NodeProperties::GetValueInput(node, 2);
- Node* equal = graph()->NewNode(simplified()->ReferenceEqual(), length,
- jsgraph()->ZeroConstant());
-
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), equal, control);
- Node* call_holey;
- Node* call_packed;
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* if_equal = graph()->NewNode(common()->IfTrue(), branch);
- {
- ArraySingleArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
-
- Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
- node->InputAt(1),
- jsgraph()->HeapConstant(site),
- jsgraph()->Constant(1),
- jsgraph()->UndefinedConstant(),
- length,
- context,
- frame_state,
- effect,
- if_equal};
-
- call_holey =
- graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- }
- Node* if_not_equal = graph()->NewNode(common()->IfFalse(), branch);
- {
- // Require elements kind to "go holey."
- ArraySingleArgumentConstructorStub stub(
- isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState, properties);
-
- Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
- node->InputAt(1),
- jsgraph()->HeapConstant(site),
- jsgraph()->Constant(1),
- jsgraph()->UndefinedConstant(),
- length,
- context,
- frame_state,
- effect,
- if_not_equal};
-
- call_packed =
- graph()->NewNode(common()->Call(desc), arraysize(inputs), inputs);
- }
- Node* merge = graph()->NewNode(common()->Merge(2), call_holey, call_packed);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), call_holey,
- call_packed, merge);
- Node* phi =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- call_holey, call_packed, merge);
-
- ReplaceWithValue(node, phi, effect_phi, merge);
- return Changed(node);
+ // Require elements kind to "go holey".
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState, properties);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ } else {
+ DCHECK_GT(arity, 1);
+ ArrayNArgumentsConstructorStub stub(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, type_info);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
-
- DCHECK(arity > 1);
- ArrayNArgumentsConstructorStub stub(isolate());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), arity + 1,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
@@ -861,46 +792,43 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* new_target = NodeProperties::GetValueInput(node, 1);
- // TODO(mstarzinger): Array constructor can throw. Hook up exceptional edges.
- if (NodeProperties::IsExceptionalCall(node)) return NoChange();
-
// TODO(bmeurer): Optimize the subclassing case.
if (target != new_target) return NoChange();
// Check if we have a feedback {site} on the {node}.
Handle<AllocationSite> site = p.site();
- if (p.site().is_null()) return NoChange();
-
- // Attempt to inline calls to the Array constructor for the relevant cases
- // where either no arguments are provided, or exactly one unsigned number
- // argument is given.
- if (site->CanInlineCall()) {
- if (p.arity() == 0) {
- Node* length = jsgraph()->ZeroConstant();
- int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, site);
- } else if (p.arity() == 1) {
- Node* length = NodeProperties::GetValueInput(node, 2);
- Type* length_type = NodeProperties::GetType(length);
- if (!length_type->Maybe(Type::Number())) {
- // Handle the single argument case, where we know that the value
- // cannot be a valid Array length.
- return ReduceNewArray(node, {length}, site);
- }
- if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
- length_type->Max() <= kElementLoopUnrollLimit &&
- length_type->Min() == length_type->Max()) {
- int capacity = static_cast<int>(length_type->Max());
+ if (!site.is_null()) {
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (!length_type->Maybe(Type::Number())) {
+ // Handle the single argument case, where we know that the value
+ // cannot be a valid Array length.
+ return ReduceNewArray(node, {length}, site);
+ }
+ if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
+ length_type->Max() <= kElementLoopUnrollLimit &&
+ length_type->Min() == length_type->Max()) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
+ std::vector<Node*> values;
+ values.reserve(p.arity());
+ for (size_t i = 0; i < p.arity(); ++i) {
+ values.push_back(
+ NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
+ }
+ return ReduceNewArray(node, values, site);
}
- } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
- std::vector<Node*> values;
- values.reserve(p.arity());
- for (size_t i = 0; i < p.arity(); ++i) {
- values.push_back(
- NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
- }
- return ReduceNewArray(node, values, site);
}
}
@@ -944,9 +872,9 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
AllocationBuilder aa(jsgraph(), effect, graph()->start());
aa.AllocateArray(2, factory()->fixed_array_map());
- aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->Constant(0), key);
- aa.Store(AccessBuilder::ForFixedArrayElement(FAST_ELEMENTS),
+ aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->Constant(1), value);
Node* elements = aa.Finish();
@@ -955,7 +883,7 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
a.Store(AccessBuilder::ForMap(), array_map);
a.Store(AccessBuilder::ForJSObjectProperties(), properties);
a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ a.Store(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS), length);
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
a.FinishAndChange(node);
return Changed(node);
@@ -974,8 +902,7 @@ Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
Handle<Object> literal(feedback_vector->Get(slot), isolate());
if (literal->IsAllocationSite()) {
Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
- Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
- isolate());
+ Handle<JSObject> boilerplate(site->boilerplate(), isolate());
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
AllocationSiteUsageContext site_context(isolate(), site, false);
@@ -1247,10 +1174,10 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
DCHECK_LE(1, capacity);
DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
- Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ Handle<Map> elements_map = IsDoubleElementsKind(elements_kind)
? factory()->fixed_double_array_map()
: factory()->fixed_array_map();
- ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ElementAccess access = IsDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
Node* value = jsgraph()->TheHoleConstant();
@@ -1273,10 +1200,10 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
DCHECK_LE(1, capacity);
DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
- Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ Handle<Map> elements_map = IsDoubleElementsKind(elements_kind)
? factory()->fixed_double_array_map()
: factory()->fixed_array_map();
- ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ElementAccess access = IsDoubleElementsKind(elements_kind)
? AccessBuilder::ForFixedDoubleArrayElement()
: AccessBuilder::ForFixedArrayElement();
@@ -1498,10 +1425,6 @@ Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
-JSOperatorBuilder* JSCreateLowering::javascript() const {
- return jsgraph()->javascript();
-}
-
CommonOperatorBuilder* JSCreateLowering::common() const {
return jsgraph()->common();
}
@@ -1510,10 +1433,6 @@ SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
return jsgraph()->simplified();
}
-MachineOperatorBuilder* JSCreateLowering::machine() const {
- return jsgraph()->machine();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index d03464d39d..e122d4cd6b 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -44,6 +44,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
zone_(zone) {}
~JSCreateLowering() final {}
+ const char* reducer_name() const override { return "JSCreateLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -73,6 +75,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
ElementsKind elements_kind, int capacity,
PretenureFlag pretenure);
Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, Node* capacity_and_length);
+ Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind,
std::vector<Node*> const& values,
PretenureFlag pretenure);
@@ -94,10 +98,8 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Handle<Context> native_context() const { return native_context_; }
- JSOperatorBuilder* javascript() const;
CommonOperatorBuilder* common() const;
SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
CompilationDependencies* dependencies() const { return dependencies_; }
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index f268b3ac5b..cbc82c4eed 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -25,6 +25,8 @@ class JSFrameSpecialization final : public AdvancedReducer {
: AdvancedReducer(editor), frame_(frame), jsgraph_(jsgraph) {}
~JSFrameSpecialization() final {}
+ const char* reducer_name() const override { return "JSFrameSpecialization"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index ea5a4a4627..02630b2420 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -50,11 +50,11 @@ Reduction JSGenericLowering::Reduce(Node* node) {
return Changed(node);
}
-#define REPLACE_STUB_CALL(Name) \
- void JSGenericLowering::LowerJS##Name(Node* node) { \
- CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
- Callable callable = CodeFactory::Name(isolate()); \
- ReplaceWithStubCall(node, callable, flags); \
+#define REPLACE_STUB_CALL(Name) \
+ void JSGenericLowering::LowerJS##Name(Node* node) { \
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node); \
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::k##Name); \
+ ReplaceWithStubCall(node, callable, flags); \
}
REPLACE_STUB_CALL(Add)
REPLACE_STUB_CALL(Subtract)
@@ -79,6 +79,7 @@ REPLACE_STUB_CALL(ToNumber)
REPLACE_STUB_CALL(ToName)
REPLACE_STUB_CALL(ToObject)
REPLACE_STUB_CALL(ToString)
+REPLACE_STUB_CALL(ToPrimitiveToString)
#undef REPLACE_STUB_CALL
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
@@ -120,7 +121,7 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
void JSGenericLowering::LowerJSStrictEqual(Node* node) {
// The === operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::StrictEqual(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStrictEqual);
node->RemoveInput(4); // control
ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags,
Operator::kEliminatable);
@@ -129,7 +130,7 @@ void JSGenericLowering::LowerJSStrictEqual(Node* node) {
void JSGenericLowering::LowerJSToBoolean(Node* node) {
// The ToBoolean conversion doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::ToBoolean(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToBoolean);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
@@ -138,7 +139,7 @@ void JSGenericLowering::LowerJSToBoolean(Node* node) {
void JSGenericLowering::LowerJSClassOf(Node* node) {
// The %_ClassOf intrinsic doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::ClassOf(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kClassOf);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
@@ -147,12 +148,25 @@ void JSGenericLowering::LowerJSClassOf(Node* node) {
void JSGenericLowering::LowerJSTypeOf(Node* node) {
// The typeof operator doesn't need the current context.
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
- Callable callable = CodeFactory::Typeof(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kTypeof);
node->AppendInput(zone(), graph()->start());
ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
Operator::kEliminatable);
}
+void JSGenericLowering::LowerJSStringConcat(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kStringConcat);
+ const CallInterfaceDescriptor& descriptor = callable.descriptor();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, operand_count, flags,
+ node->op()->properties());
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 1, jsgraph()->Int32Constant(operand_count));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -161,17 +175,18 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::KeyedLoadIC(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kKeyedLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
-
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
@@ -180,10 +195,11 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
- Callable callable = CodeFactory::LoadIC(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kLoadICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
- Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kLoadIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
@@ -317,19 +333,25 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::GetSuperConstructor(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kGetSuperConstructor);
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSHasInPrototypeChain(Node* node) {
+ ReplaceWithRuntimeCall(node, Runtime::kHasInPrototypeChain);
+}
+
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::InstanceOf(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kInstanceOf);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::OrdinaryHasInstance(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kOrdinaryHasInstance);
ReplaceWithStubCall(node, callable, flags);
}
@@ -345,7 +367,8 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
void JSGenericLowering::LowerJSCreate(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::FastNewObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewObject);
ReplaceWithStubCall(node, callable, flags);
}
@@ -370,13 +393,21 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
Handle<AllocationSite> const site = p.site();
- Node* new_target = node->InputAt(1);
+ ArrayConstructorDescriptor descriptor(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, arity + 1,
+ CallDescriptor::kNeedsFrameState, node->op()->properties(),
+ MachineType::AnyTagged());
+ Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
+ Node* stub_arity = jsgraph()->Int32Constant(arity);
Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
: jsgraph()->HeapConstant(site);
- node->RemoveInput(1);
- node->InsertInput(zone(), 1 + arity, new_target);
- node->InsertInput(zone(), 2 + arity, type_info);
- ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, type_info);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -385,11 +416,12 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+ node->RemoveInput(3); // control
- // Use the FastNewClosurebuiltin only for functions allocated in new
- // space.
+ // Use the FastNewClosure builtin only for functions allocated in new space.
if (p.pretenure() == NOT_TENURED) {
- Callable callable = CodeFactory::FastNewClosure(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastNewClosure);
node->InsertInput(zone(), 1,
jsgraph()->HeapConstant(p.feedback().vector()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -432,11 +464,11 @@ void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateKeyValueArray(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kCreateKeyValueArray);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
@@ -447,7 +479,7 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
// Use the FastCloneShallowArray builtin only for shallow boilerplates without
// properties up to the number of elements that the stubs can handle.
- if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
+ if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() < ConstructorBuiltins::kMaximumClonedShallowArrayElements) {
Callable callable = CodeFactory::FastCloneShallowArray(
isolate(), DONT_TRACK_ALLOCATION_SITE);
@@ -468,10 +500,11 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
// Use the FastCloneShallowObject builtin only for shallow boilerplates
// without elements up to the number of properties that the stubs can handle.
- if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
+ if ((p.flags() & AggregateLiteral::kIsShallow) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable = CodeFactory::FastCloneShallowObject(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneShallowObject);
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
@@ -482,7 +515,8 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::FastCloneRegExp(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kFastCloneRegExp);
Node* literal_index = jsgraph()->SmiConstant(p.index());
Node* literal_flags = jsgraph()->SmiConstant(p.flags());
Node* pattern = jsgraph()->HeapConstant(p.constant());
@@ -494,19 +528,11 @@ void JSGenericLowering::LowerJSCreateLiteralRegExp(Node* node) {
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- const CreateCatchContextParameters& parameters =
- CreateCatchContextParametersOf(node->op());
- node->InsertInput(zone(), 0,
- jsgraph()->HeapConstant(parameters.catch_name()));
- node->InsertInput(zone(), 2,
- jsgraph()->HeapConstant(parameters.scope_info()));
- ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateWithContext(Node* node) {
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
- ReplaceWithRuntimeCall(node, Runtime::kPushWithContext);
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
@@ -563,22 +589,46 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = jsgraph()->UndefinedConstant();
+ Node* arguments_list = node->InputAt(1);
+ Node* new_target = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(2, new_target);
+ node->ReplaceInput(3, arguments_list);
+ node->InsertInput(zone(), 4, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
+ int const spread_index = arg_count;
+ int const new_target_index = arg_count + 1;
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
- Node* new_target = node->InputAt(arg_count + 1);
+ Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
+ Node* new_target = node->InputAt(new_target_index);
+ Node* spread = node->InputAt(spread_index);
Node* receiver = jsgraph()->UndefinedConstant();
- node->RemoveInput(arg_count + 1); // Drop new target.
+ DCHECK(new_target_index > spread_index);
+ node->RemoveInput(new_target_index); // Drop new target.
+ node->RemoveInput(spread_index);
+
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, new_target);
- node->InsertInput(zone(), 3, stub_arity);
- node->InsertInput(zone(), 4, receiver);
+ node->InsertInput(zone(), 3, stack_arg_count);
+ node->InsertInput(zone(), 4, spread);
+ node->InsertInput(zone(), 5, receiver);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -587,9 +637,6 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::CallForwardVarargs(isolate());
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -607,9 +654,6 @@ void JSGenericLowering::LowerJSCall(Node* node) {
ConvertReceiverMode const mode = p.convert_mode();
Callable callable = CodeFactory::Call(isolate(), mode);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -619,17 +663,35 @@ void JSGenericLowering::LowerJSCall(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCallWithArrayLike(Node* node) {
+ Callable callable = CodeFactory::CallWithArrayLike(isolate());
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* receiver = node->InputAt(1);
+ Node* arguments_list = node->InputAt(2);
+ node->InsertInput(zone(), 0, stub_code);
+ node->ReplaceInput(3, receiver);
+ node->ReplaceInput(2, arguments_list);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
SpreadWithArityParameter const& p = SpreadWithArityParameterOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
- Callable callable = CodeFactory::CallWithSpread(isolate());
+ int const spread_index = static_cast<int>(p.arity() + 1);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::CallWithSpread(isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ // We pass the spread in a register, not on the stack.
+ Node* stack_arg_count = jsgraph()->Int32Constant(arg_count - 1);
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 2, stack_arg_count);
+ node->InsertInput(zone(), 3, node->InputAt(spread_index));
+ node->RemoveInput(spread_index + 1);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -644,13 +706,13 @@ void JSGenericLowering::LowerJSConvertReceiver(Node* node) {
void JSGenericLowering::LowerJSForInNext(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ForInNext(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInNext);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- Callable callable = CodeFactory::ForInPrepare(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kForInPrepare);
ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 88d0b45156..1a8102da59 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -27,6 +27,8 @@ class JSGenericLowering final : public Reducer {
explicit JSGenericLowering(JSGraph* jsgraph);
~JSGenericLowering() final;
+ const char* reducer_name() const override { return "JSGenericLowering"; }
+
Reduction Reduce(Node* node) final;
protected:
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 93706acf5a..dfe05933bb 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -26,6 +26,11 @@ Node* JSGraph::AllocateInOldSpaceStubConstant() {
HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
+Node* JSGraph::ArrayConstructorStubConstant() {
+ return CACHED(kArrayConstructorStubConstant,
+ HeapConstant(ArrayConstructorStub(isolate()).GetCode()));
+}
+
Node* JSGraph::ToNumberBuiltinConstant() {
return CACHED(kToNumberBuiltinConstant,
HeapConstant(isolate()->builtins()->ToNumber()));
@@ -77,6 +82,11 @@ Node* JSGraph::FixedArrayMapConstant() {
HeapConstant(factory()->fixed_array_map()));
}
+Node* JSGraph::PropertyArrayMapConstant() {
+ return CACHED(kPropertyArrayMapConstant,
+ HeapConstant(factory()->property_array_map()));
+}
+
Node* JSGraph::FixedDoubleArrayMapConstant() {
return CACHED(kFixedDoubleArrayMapConstant,
HeapConstant(factory()->fixed_double_array_map()));
@@ -130,6 +140,9 @@ Node* JSGraph::OneConstant() {
return CACHED(kOneConstant, NumberConstant(1.0));
}
+Node* JSGraph::MinusOneConstant() {
+ return CACHED(kMinusOneConstant, NumberConstant(-1.0));
+}
Node* JSGraph::NaNConstant() {
return CACHED(kNaNConstant,
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 4b3ed4856a..a4eb9a9061 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Canonicalized global constants.
Node* AllocateInNewSpaceStubConstant();
Node* AllocateInOldSpaceStubConstant();
+ Node* ArrayConstructorStubConstant();
Node* ToNumberBuiltinConstant();
Node* CEntryStubConstant(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs,
@@ -51,6 +52,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
Node* EmptyFixedArrayConstant();
Node* EmptyStringConstant();
Node* FixedArrayMapConstant();
+ Node* PropertyArrayMapConstant();
Node* FixedDoubleArrayMapConstant();
Node* HeapNumberMapConstant();
Node* OptimizedOutConstant();
@@ -63,6 +65,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
Node* ZeroConstant();
Node* OneConstant();
Node* NaNConstant();
+ Node* MinusOneConstant();
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
@@ -164,6 +167,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
enum CachedNode {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
+ kArrayConstructorStubConstant,
kToNumberBuiltinConstant,
kCEntryStub1Constant,
kCEntryStub2Constant,
@@ -173,6 +177,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStringConstant,
kFixedArrayMapConstant,
kFixedDoubleArrayMapConstant,
+ kPropertyArrayMapConstant,
kHeapNumberMapConstant,
kOptimizedOutConstant,
kStaleRegisterConstant,
@@ -183,6 +188,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kNullConstant,
kZeroConstant,
kOneConstant,
+ kMinusOneConstant,
kNaNConstant,
kEmptyStateValues,
kSingleDeadTypedStateValues,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 0f5f9f87c1..e1026d6c3b 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -24,6 +24,8 @@ class JSInliningHeuristic final : public AdvancedReducer {
seen_(local_zone),
jsgraph_(jsgraph) {}
+ const char* reducer_name() const override { return "JSInliningHeuristic"; }
+
Reduction Reduce(Node* node) final;
// Processes the list of candidates gathered while the reducer was running,
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 9b260e3533..4172998544 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -250,36 +250,6 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
outer_frame_state);
}
-Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- Handle<SharedFunctionInfo> shared;
- frame_info.shared_info().ToHandle(&shared);
-
- Node* function = frame_state->InputAt(kFrameStateFunctionInput);
-
- // If we are inlining a tail call drop caller's frame state and an
- // arguments adaptor if it exists.
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
- if (frame_state->opcode() == IrOpcode::kFrameState) {
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- if (frame_info.type() == FrameStateType::kArgumentsAdaptor) {
- frame_state = NodeProperties::GetFrameStateInput(frame_state);
- }
- }
-
- const FrameStateFunctionInfo* state_info =
- common()->CreateFrameStateFunctionInfo(
- FrameStateType::kTailCallerFunction, 0, 0, shared);
-
- const Operator* op = common()->FrameState(
- BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
- Node* node0 = graph()->NewNode(op0);
- return graph()->NewNode(op, node0, node0, node0,
- jsgraph()->UndefinedConstant(), function,
- frame_state);
-}
-
namespace {
// TODO(bmeurer): Unify this with the witness helper functions in the
@@ -498,7 +468,7 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
// Function contains break points.
- if (shared_info->HasDebugInfo()) {
+ if (shared_info->HasBreakInfo()) {
TRACE("Not inlining %s into %s because callee may contain break points\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -552,12 +522,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- // Remember that we inlined this function. This needs to be called right
- // after we ensure deoptimization support so that the code flusher
- // does not remove the code with the deoptimization support.
- int inlining_id = info_->AddInlinedFunction(
- shared_info, source_positions_->GetSourcePosition(node));
-
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
@@ -571,6 +535,10 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
Handle<FeedbackVector> feedback_vector;
DetermineCallContext(node, context, feedback_vector);
+ // Remember that we inlined this function.
+ int inlining_id = info_->AddInlinedFunction(
+ shared_info, source_positions_->GetSourcePosition(node));
+
// Create the subgraph for the inlinee.
Node* start;
Node* end;
@@ -754,20 +722,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
}
}
- // If we are inlining a JS call at tail position then we have to pop current
- // frame state and its potential arguments adaptor frame state in order to
- // make the call stack be consistent with non-inlining case.
- // After that we add a tail caller frame state which lets deoptimizer handle
- // the case when the outermost function inlines a tail call (it should remove
- // potential arguments adaptor frame that belongs to outermost function when
- // deopt happens).
- if (node->opcode() == IrOpcode::kJSCall) {
- const CallParameters& p = CallParametersOf(node->op());
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- frame_state = CreateTailCallerFrameState(node, frame_state);
- }
- }
-
// Insert argument adaptor frame if required. The callees formal parameter
// count (i.e. value outputs of start node minus target, receiver, new target,
// arguments count and context) have to match the number of arguments passed
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index e40e6a745e..cff72b0760 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -31,6 +31,8 @@ class JSInliner final : public AdvancedReducer {
jsgraph_(jsgraph),
source_positions_(source_positions) {}
+ const char* reducer_name() const override { return "JSInliner"; }
+
// Reducer interface, eagerly inlines everything.
Reduction Reduce(Node* node) final;
@@ -60,8 +62,6 @@ class JSInliner final : public AdvancedReducer {
FrameStateType frame_state_type,
Handle<SharedFunctionInfo> shared);
- Node* CreateTailCallerFrameState(Node* node, Node* outer_frame_state);
-
Reduction InlineCall(Node* call, Node* new_target, Node* context,
Node* frame_state, Node* start, Node* end,
Node* exception_target,
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index b9ee8a4ed6..b4b0ffaa51 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -42,8 +42,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
- case Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos:
- return ReduceAsyncGeneratorGetAwaitInputOrDebugPos(node);
case Runtime::kInlineAsyncGeneratorReject:
return ReduceAsyncGeneratorReject(node);
case Runtime::kInlineAsyncGeneratorResolve:
@@ -62,10 +60,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_MAP_TYPE);
case Runtime::kInlineIsJSSet:
return ReduceIsInstanceType(node, JS_SET_TYPE);
- case Runtime::kInlineIsJSMapIterator:
- return ReduceIsInstanceType(node, JS_MAP_ITERATOR_TYPE);
- case Runtime::kInlineIsJSSetIterator:
- return ReduceIsInstanceType(node, JS_SET_ITERATOR_TYPE);
case Runtime::kInlineIsJSWeakMap:
return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
case Runtime::kInlineIsJSWeakSet:
@@ -198,23 +192,16 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
-Reduction JSIntrinsicLowering::ReduceAsyncGeneratorGetAwaitInputOrDebugPos(
- Node* node) {
- Node* const generator = NodeProperties::GetValueInput(node, 0);
- Node* const effect = NodeProperties::GetEffectInput(node);
- Node* const control = NodeProperties::GetControlInput(node);
- Operator const* const op = simplified()->LoadField(
- AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos());
-
- return Change(node, op, generator, effect, control);
-}
-
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorReject(Node* node) {
- return Change(node, CodeFactory::AsyncGeneratorReject(isolate()), 0);
+ return Change(
+ node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorReject),
+ 0);
}
Reduction JSIntrinsicLowering::ReduceAsyncGeneratorResolve(Node* node) {
- return Change(node, CodeFactory::AsyncGeneratorResolve(isolate()), 0);
+ return Change(
+ node, Builtins::CallableFor(isolate(), Builtins::kAsyncGeneratorResolve),
+ 0);
}
Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 0f3e84a5e5..fe5d4f370f 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -37,6 +37,8 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
DeoptimizationMode mode);
~JSIntrinsicLowering() final {}
+ const char* reducer_name() const override { return "JSIntrinsicLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -47,7 +49,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceGeneratorClose(Node* node);
Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
- Reduction ReduceAsyncGeneratorGetAwaitInputOrDebugPos(Node* node);
Reduction ReduceAsyncGeneratorReject(Node* node);
Reduction ReduceAsyncGeneratorResolve(Node* node);
Reduction ReduceGeneratorSaveInputForAwait(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 5a3ccebed1..a323ba68f6 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -13,6 +13,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
+#include "src/compiler/property-access-builder.h"
#include "src/compiler/type-cache.h"
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
@@ -38,15 +39,7 @@ bool HasOnlyJSArrayMaps(MapHandles const& maps) {
return true;
}
-bool HasOnlyNumberMaps(MapHandles const& maps) {
- for (auto map : maps) {
- if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
- }
- return true;
-}
-
-template <typename T>
-bool HasOnlyStringMaps(T const& maps) {
+bool HasOnlyStringMaps(MapHandles const& maps) {
for (auto map : maps) {
if (!map->IsStringMap()) return false;
}
@@ -79,10 +72,14 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSAdd:
return ReduceJSAdd(node);
+ case IrOpcode::kJSStringConcat:
+ return ReduceJSStringConcat(node);
case IrOpcode::kJSGetSuperConstructor:
return ReduceJSGetSuperConstructor(node);
case IrOpcode::kJSInstanceOf:
return ReduceJSInstanceOf(node);
+ case IrOpcode::kJSHasInPrototypeChain:
+ return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSLoadContext:
@@ -133,6 +130,59 @@ Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSStringConcat(Node* node) {
+ // TODO(turbofan): This has to run together with the inlining and
+ // native context specialization to be able to leverage the string
+ // constant-folding for optimizing property access, but we should
+ // nevertheless find a better home for this at some point.
+ DCHECK_EQ(IrOpcode::kJSStringConcat, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ DCHECK_GE(StringConcatParameterOf(node->op()).operand_count(), 3);
+
+ // Constant-fold string concatenation.
+ HeapObjectMatcher last_operand(NodeProperties::GetValueInput(node, 0));
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ for (int i = 1; i < operand_count; ++i) {
+ HeapObjectMatcher current_operand(NodeProperties::GetValueInput(node, i));
+
+ if (last_operand.HasValue() && current_operand.HasValue()) {
+ Handle<String> left = Handle<String>::cast(last_operand.Value());
+ Handle<String> right = Handle<String>::cast(current_operand.Value());
+ if (left->length() + right->length() <= String::kMaxLength) {
+ Handle<String> result =
+ factory()->NewConsString(left, right).ToHandleChecked();
+ Node* value = jsgraph()->HeapConstant(result);
+ node->ReplaceInput(i - 1, value);
+ node->RemoveInput(i);
+ last_operand = HeapObjectMatcher(value);
+ i--;
+ operand_count--;
+ continue;
+ }
+ }
+ last_operand = current_operand;
+ }
+
+ if (operand_count == StringConcatParameterOf(node->op()).operand_count()) {
+ return NoChange();
+ } else if (operand_count == 1) {
+ // Replace with input if there is only one input left.
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ } else if (operand_count == 2) {
+ // Replace with JSAdd if we only have two operands left.
+ NodeProperties::ChangeOp(node,
+ javascript()->Add(BinaryOperationHint::kString));
+ return Changed(node);
+ } else {
+ // Otherwise update operand count.
+ NodeProperties::ChangeOp(node, javascript()->StringConcat(operand_count));
+ return Changed(node);
+ }
+}
+
Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
Node* node) {
DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
@@ -166,6 +216,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Node* constructor = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Check if the right hand side is a known {receiver}.
@@ -184,6 +235,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
if (access_info.IsNotFound()) {
// If there's no @@hasInstance handler, the OrdinaryHasInstance operation
// takes over, but that requires the {receiver} to be callable.
@@ -191,12 +244,13 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(
+ native_context(), access_info.receiver_maps(), holder);
}
// Monomorphic property access.
- effect = BuildCheckMaps(constructor, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(constructor, &effect, control,
+ access_info.receiver_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
@@ -211,7 +265,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(
+ native_context(), access_info.receiver_maps(), holder);
} else {
holder = receiver;
}
@@ -232,14 +287,25 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
DCHECK(constant->IsCallable());
// Monomorphic property access.
- effect = BuildCheckMaps(constructor, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(constructor, &effect, control,
+ access_info.receiver_maps());
+
+ // Create a nested frame state inside the current method's most-recent frame
+ // state that will ensure that deopts that happen after this point will not
+ // fallback to the last Checkpoint--which would completely re-execute the
+ // instanceof logic--but rather create an activation of a version of the
+ // ToBoolean stub that finishes the remaining work of instanceof and returns
+ // to the caller without duplicating side-effects upon a lazy deopt.
+ Node* continuation_frame_state = CreateStubBuiltinContinuationFrameState(
+ jsgraph(), Builtins::kToBooleanLazyDeoptContinuation, context, nullptr,
+ 0, frame_state, ContinuationFrameStateMode::LAZY);
// Call the @@hasInstance handler.
Node* target = jsgraph()->Constant(constant);
node->InsertInput(graph()->zone(), 0, target);
node->ReplaceInput(1, constructor);
node->ReplaceInput(2, object);
+ node->ReplaceInput(4, continuation_frame_state);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
node, javascript()->Call(3, CallFrequency(), VectorSlotPair(),
@@ -260,15 +326,85 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+JSNativeContextSpecialization::InferHasInPrototypeChainResult
+JSNativeContextSpecialization::InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<HeapObject> prototype) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+
+ // Check if either all or none of the {receiver_maps} have the given
+ // {prototype} in their prototype chain.
+ bool all = true;
+ bool none = true;
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ // In case of an unreliable {result} we need to ensure that all
+ // {receiver_maps} are stable, because otherwise we cannot trust
+ // the {receiver_maps} information, since arbitrary side-effects
+ // may have happened.
+ if (!receiver_map->is_stable()) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ for (PrototypeIterator j(receiver_map);; j.Advance()) {
+ if (j.IsAtEnd()) {
+ all = false;
+ break;
+ }
+ Handle<HeapObject> const current =
+ PrototypeIterator::GetCurrent<HeapObject>(j);
+ if (current.is_identical_to(prototype)) {
+ none = false;
+ break;
+ }
+ if (!current->map()->is_stable() ||
+ current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ }
+ DCHECK_IMPLIES(all, !none);
+ DCHECK_IMPLIES(none, !all);
+
+ if (all) return kIsInPrototypeChain;
+ if (none) return kIsNotInPrototypeChain;
+ return kMayBeInPrototypeChain;
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSHasInPrototypeChain(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* prototype = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Check if we can constant-fold the prototype chain walk
+ // for the given {value} and the {prototype}.
+ HeapObjectMatcher m(prototype);
+ if (m.HasValue()) {
+ InferHasInPrototypeChainResult result =
+ InferHasInPrototypeChain(value, effect, m.Value());
+ if (result != kMayBeInPrototypeChain) {
+ Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
Node* node) {
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
Node* object = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
// Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
@@ -302,144 +438,15 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Install a code dependency on the {function}s initial map.
Handle<Map> initial_map(function->initial_map(), isolate());
dependencies()->AssumeInitialMapCantChange(initial_map);
- Handle<JSReceiver> function_prototype =
- handle(JSReceiver::cast(initial_map->prototype()), isolate());
-
- // Check if we can constant-fold the prototype chain walk
- // for the given {object} and the {function_prototype}.
- InferHasInPrototypeChainResult result =
- InferHasInPrototypeChain(object, effect, function_prototype);
- if (result != kMayBeInPrototypeChain) {
- Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
-
- Node* prototype = jsgraph()->Constant(function_prototype);
-
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
- Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- Node* loop = control =
- graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* vloop = object =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- object, object, loop);
-
- // Load the {object} map and instance type.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- object, effect, control);
- Node* object_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- object_map, effect, control);
-
- // Check if the {object} is a special receiver, because for special
- // receivers, i.e. proxies or API objects that need access checks,
- // we have to use the %HasInPrototypeChain runtime function instead.
- Node* check1 = graph()->NewNode(
- simplified()->NumberLessThanOrEqual(), object_instance_type,
- jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, control);
-
- control = graph()->NewNode(common()->IfFalse(), branch1);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
-
- // Check if the {object} is not a receiver at all.
- Node* check10 =
- graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
- jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
- Node* branch10 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check10, if_true1);
-
- // A primitive value cannot match the {prototype} we're looking for.
- if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
- vtrue1 = jsgraph()->FalseConstant();
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
- Node* efalse1 = etrue1;
- Node* vfalse1;
- {
- // Slow path, need to call the %HasInPrototypeChain runtime function.
- vfalse1 = efalse1 = if_false1 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
- prototype, context, frame_state, efalse1, if_false1);
-
- // Replace any potential {IfException} uses of {node} to catch
- // exceptions from this %HasInPrototypeChain runtime call instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse1);
- NodeProperties::ReplaceEffectInput(on_exception, efalse1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- Revisit(on_exception);
- }
- }
-
- // Load the {object} prototype.
- Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
- effect, control);
-
- // Check if we reached the end of {object}s prototype chain.
- Node* check2 =
- graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
- jsgraph()->NullConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = effect;
- Node* vtrue2 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch2);
-
- // Check if we reached the {prototype}.
- Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, prototype);
- Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = effect;
- Node* vtrue3 = jsgraph()->TrueConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch3);
-
- // Close the loop.
- vloop->ReplaceInput(1, object_prototype);
- eloop->ReplaceInput(1, effect);
- loop->ReplaceInput(1, control);
-
- control = graph()->NewNode(common()->Merge(5), if_true0, if_true1,
- if_true2, if_true3, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
- etrue3, efalse1, control);
-
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, vtrue0);
- node->ReplaceInput(1, vtrue1);
- node->ReplaceInput(2, vtrue2);
- node->ReplaceInput(3, vtrue3);
- node->ReplaceInput(4, vfalse1);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(
- node, common()->Phi(MachineRepresentation::kTagged, 5));
- return Changed(node);
+ Node* prototype =
+ jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
+
+ // Lower the {node} to JSHasInPrototypeChain.
+ NodeProperties::ReplaceValueInput(node, object, 0);
+ NodeProperties::ReplaceValueInput(node, prototype, 1);
+ NodeProperties::ChangeOp(node, javascript()->HasInPrototypeChain());
+ Reduction const reduction = ReduceJSHasInPrototypeChain(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
}
@@ -745,17 +752,6 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
return NoChange();
}
- // TODO(turbofan): Add support for inlining into try blocks.
- bool is_exceptional = NodeProperties::IsExceptionalCall(node);
- for (const auto& access_info : access_infos) {
- if (access_info.IsAccessorConstant()) {
- // Accessor in try-blocks are not supported yet.
- if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
- return NoChange();
- }
- }
- }
-
// Nothing to do if we have no non-deprecated maps.
if (access_infos.empty()) {
return ReduceSoftDeoptimize(
@@ -769,29 +765,35 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
}
+ // Collect call nodes to rewire exception edges.
+ ZoneVector<Node*> if_exception_nodes(zone());
+ ZoneVector<Node*>* if_exceptions = nullptr;
+ Node* if_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
+ if_exceptions = &if_exception_nodes;
+ }
+
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+
// Check for the monomorphic cases.
if (access_infos.size() == 1) {
PropertyAccessInfo access_info = access_infos.front();
- if (HasOnlyStringMaps(access_info.receiver_maps())) {
- // Monormorphic string access (ignoring the fact that there are multiple
- // String maps).
- receiver = effect = graph()->NewNode(simplified()->CheckString(),
- receiver, effect, control);
- } else if (HasOnlyNumberMaps(access_info.receiver_maps())) {
- // Monomorphic number access (we also deal with Smis here).
- receiver = effect = graph()->NewNode(simplified()->CheckNumber(),
- receiver, effect, control);
- } else {
- // Monomorphic property access.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
- effect = BuildCheckMaps(receiver, effect, control,
- access_info.receiver_maps());
+ // Try to build string check or number check if possible.
+ // Otherwise build a map check.
+ if (!access_builder.TryBuildStringCheck(access_info.receiver_maps(),
+ &receiver, &effect, control) &&
+ !access_builder.TryBuildNumberCheck(access_info.receiver_maps(),
+ &receiver, &effect, control)) {
+ receiver =
+ access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
}
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state, effect, control, name,
- access_info, access_mode, language_mode);
+ if_exceptions, access_info, access_mode, language_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -821,7 +823,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- receiver = BuildCheckHeapObject(receiver, &effect, control);
+ receiver =
+ access_builder.BuildCheckHeapObject(receiver, &effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -848,8 +851,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- this_effect = BuildCheckMaps(receiver, this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
+ receiver_maps);
this_effects.push_back(this_effect);
this_controls.push_back(fallthrough_control);
fallthrough_control = nullptr;
@@ -894,9 +897,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
// Generate the actual property access.
- ValueEffectControl continuation = BuildPropertyAccess(
- this_receiver, this_value, context, frame_state, this_effect,
- this_control, name, access_info, access_mode, language_mode);
+ ValueEffectControl continuation =
+ BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+ this_effect, this_control, name, if_exceptions,
+ access_info, access_mode, language_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -924,6 +928,24 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
control_count + 1, &effects.front());
}
}
+
+ // Properly rewire IfException edges if {node} is inside a try-block.
+ if (!if_exception_nodes.empty()) {
+ DCHECK_NOT_NULL(if_exception);
+ DCHECK_EQ(if_exceptions, &if_exception_nodes);
+ int const if_exception_count = static_cast<int>(if_exceptions->size());
+ Node* merge = graph()->NewNode(common()->Merge(if_exception_count),
+ if_exception_count, &if_exceptions->front());
+ if_exceptions->push_back(merge);
+ Node* ephi =
+ graph()->NewNode(common()->EffectPhi(if_exception_count),
+ if_exception_count + 1, &if_exceptions->front());
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, if_exception_count),
+ if_exception_count + 1, &if_exceptions->front());
+ ReplaceWithValue(if_exception, phi, ephi, merge);
+ }
+
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1109,7 +1131,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// store is either holey, or we have a potentially growing store,
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
- if (IsHoleyElementsKind(receiver_map->elements_kind()) ||
+ if (IsHoleyOrDictionaryElementsKind(receiver_map->elements_kind()) ||
IsGrowStoreMode(store_mode)) {
// Make sure all prototypes are stable and have fast elements.
for (Handle<Map> map = receiver_map;;) {
@@ -1133,7 +1155,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -1162,8 +1185,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
control);
// Perform map check on the {receiver}.
- effect = BuildCheckMaps(receiver, effect, control,
- access_info.receiver_maps());
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
// Access the actual element.
ValueEffectControl continuation =
@@ -1214,8 +1237,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
- this_effect = BuildCheckMaps(receiver, this_effect, this_control,
- receiver_maps);
+ access_builder.BuildCheckMaps(receiver, &this_effect, this_control,
+ receiver_maps);
fallthrough_control = nullptr;
} else {
ZoneVector<Node*> this_controls(zone());
@@ -1450,157 +1473,246 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
p.language_mode(), store_mode);
}
+Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
+ Node* receiver, Node* context, Node* frame_state, Node** effect,
+ Node** control, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info) {
+ Node* target = jsgraph()->Constant(access_info.constant());
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info =
+ frame_info.shared_info().ToHandleChecked();
+ // We need a FrameState for the getter stub to restore the correct
+ // context before returning to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub, 1, 0,
+ shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+ receiver),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
+ target, frame_state);
+
+ // Introduce the call to the getter function.
+ Node* value;
+ if (access_info.constant()->IsJSFunction()) {
+ value = *effect = *control = graph()->NewNode(
+ jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, context, frame_state0, *effect, *control);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ Node* holder =
+ access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ value =
+ InlineApiCall(receiver, holder, context, target, frame_state0, nullptr,
+ effect, control, shared_info, function_template_info);
+ }
+ // Remember to rewire the IfException edge if this is inside a try-block.
+ if (if_exceptions != nullptr) {
+ // Create the appropriate IfException/IfSuccess projections.
+ Node* const if_exception =
+ graph()->NewNode(common()->IfException(), *control, *effect);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), *control);
+ if_exceptions->push_back(if_exception);
+ *control = if_success;
+ }
+ return value;
+}
+
+Node* JSNativeContextSpecialization::InlinePropertySetterCall(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info) {
+ Node* target = jsgraph()->Constant(access_info.constant());
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> shared_info =
+ frame_info.shared_info().ToHandleChecked();
+ // We need a FrameState for the setter stub to restore the correct
+ // context and return the appropriate value to fullcodegen.
+ FrameStateFunctionInfo const* frame_info0 =
+ common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub, 2, 0,
+ shared_info);
+ Node* frame_state0 = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ frame_info0),
+ graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+ receiver, value),
+ jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(), context,
+ target, frame_state);
+
+ // Introduce the call to the setter function.
+ if (access_info.constant()->IsJSFunction()) {
+ *effect = *control = graph()->NewNode(
+ jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined),
+ target, receiver, value, context, frame_state0, *effect, *control);
+ } else {
+ DCHECK(access_info.constant()->IsFunctionTemplateInfo());
+ Handle<FunctionTemplateInfo> function_template_info(
+ Handle<FunctionTemplateInfo>::cast(access_info.constant()));
+ DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
+ Node* holder =
+ access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ value =
+ InlineApiCall(receiver, holder, context, target, frame_state0, value,
+ effect, control, shared_info, function_template_info);
+ }
+ // Remember to rewire the IfException edge if this is inside a try-block.
+ if (if_exceptions != nullptr) {
+ // Create the appropriate IfException/IfSuccess projections.
+ Node* const if_exception =
+ graph()->NewNode(common()->IfException(), *control, *effect);
+ Node* const if_success = graph()->NewNode(common()->IfSuccess(), *control);
+ if_exceptions->push_back(if_exception);
+ *control = if_success;
+ }
+ return value;
+}
+
+Node* JSNativeContextSpecialization::InlineApiCall(
+ Node* receiver, Node* holder, Node* context, Node* target,
+ Node* frame_state, Node* value, Node** effect, Node** control,
+ Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ Handle<CallHandlerInfo> call_handler_info = handle(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+ Handle<Object> call_data_object(call_handler_info->data(), isolate());
+
+ // Only setters have a value.
+ int const argc = value == nullptr ? 0 : 1;
+ // The stub always expects the receiver as the first param on the stack.
+ CallApiCallbackStub stub(
+ isolate(), argc,
+ true /* FunctionTemplateInfo doesn't have an associated context. */);
+ CallInterfaceDescriptor call_interface_descriptor =
+ stub.GetCallInterfaceDescriptor();
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), call_interface_descriptor,
+ call_interface_descriptor.GetStackParameterCount() + argc +
+ 1 /* implicit receiver */,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+
+ Node* data = jsgraph()->Constant(call_data_object);
+ ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
+ Node* function_reference =
+ graph()->NewNode(common()->ExternalConstant(ExternalReference(
+ &function, ExternalReference::DIRECT_API_CALL, isolate())));
+ Node* code = jsgraph()->HeapConstant(stub.GetCode());
+
+ // Add CallApiCallbackStub's register argument as well.
+ Node* inputs[11] = {code, target, data, holder, function_reference, receiver};
+ int index = 6 + argc;
+ inputs[index++] = context;
+ inputs[index++] = frame_state;
+ inputs[index++] = *effect;
+ inputs[index++] = *control;
+ // This needs to stay here because of the edge case described in
+ // http://crbug.com/675648.
+ if (value != nullptr) {
+ inputs[6] = value;
+ }
+
+ return *effect = *control =
+ graph()->NewNode(common()->Call(call_descriptor), index, inputs);
+}
+
JSNativeContextSpecialization::ValueEffectControl
-JSNativeContextSpecialization::BuildPropertyAccess(
- Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
- Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
- AccessMode access_mode, LanguageMode language_mode) {
+JSNativeContextSpecialization::BuildPropertyLoad(
+ Node* receiver, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, LanguageMode language_mode) {
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
if (access_info.holder().ToHandle(&holder)) {
- DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
- AssumePrototypesStable(access_info.receiver_maps(), holder);
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
}
// Generate the actual property access.
+ Node* value;
if (access_info.IsNotFound()) {
- DCHECK_EQ(AccessMode::kLoad, access_mode);
value = jsgraph()->UndefinedConstant();
} else if (access_info.IsDataConstant()) {
DCHECK(!FLAG_track_constant_fields);
+ value = jsgraph()->Constant(access_info.constant());
+ } else if (access_info.IsAccessorConstant()) {
+ value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
+ &control, if_exceptions, access_info);
+ } else {
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ value = access_builder.BuildLoadDataField(name, access_info, receiver,
+ &effect, &control);
+ }
+
+ return ValueEffectControl(value, effect, control);
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, AccessMode access_mode,
+ LanguageMode language_mode) {
+ switch (access_mode) {
+ case AccessMode::kLoad:
+ return BuildPropertyLoad(receiver, context, frame_state, effect, control,
+ name, if_exceptions, access_info, language_mode);
+ case AccessMode::kStore:
+ case AccessMode::kStoreInLiteral:
+ return BuildPropertyStore(receiver, value, context, frame_state, effect,
+ control, name, if_exceptions, access_info,
+ access_mode, language_mode);
+ }
+ UNREACHABLE();
+ return ValueEffectControl();
+}
+
+JSNativeContextSpecialization::ValueEffectControl
+JSNativeContextSpecialization::BuildPropertyStore(
+ Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name, ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info, AccessMode access_mode,
+ LanguageMode language_mode) {
+ // Determine actual holder and perform prototype chain checks.
+ Handle<JSObject> holder;
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ if (access_info.holder().ToHandle(&holder)) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+ access_builder.AssumePrototypesStable(native_context(),
+ access_info.receiver_maps(), holder);
+ }
+
+ DCHECK(!access_info.IsNotFound());
+
+ // Generate the actual property access.
+ if (access_info.IsDataConstant()) {
+ DCHECK(!FLAG_track_constant_fields);
Node* constant_value = jsgraph()->Constant(access_info.constant());
- if (access_mode == AccessMode::kStore) {
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
- constant_value);
- effect =
- graph()->NewNode(simplified()->CheckIf(), check, effect, control);
- }
+ Node* check =
+ graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
- // TODO(bmeurer): Properly rewire the IfException edge here if there's any.
- Node* target = jsgraph()->Constant(access_info.constant());
- FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
- Handle<SharedFunctionInfo> shared_info =
- frame_info.shared_info().ToHandleChecked();
- switch (access_mode) {
- case AccessMode::kLoad: {
- // We need a FrameState for the getter stub to restore the correct
- // context before returning to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kGetterStub,
- 1, 0, shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
- receiver),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
- context, target, frame_state);
-
- // Introduce the call to the getter function.
- if (access_info.constant()->IsJSFunction()) {
- value = effect = control = graph()->NewNode(
- javascript()->Call(2, CallFrequency(), VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, context, frame_state0, effect, control);
- } else {
- DCHECK(access_info.constant()->IsFunctionTemplateInfo());
- Handle<FunctionTemplateInfo> function_template_info(
- Handle<FunctionTemplateInfo>::cast(access_info.constant()));
- DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, nullptr, effect, control,
- shared_info, function_template_info);
- value = value_effect_control.value();
- effect = value_effect_control.effect();
- control = value_effect_control.control();
- }
- break;
- }
- case AccessMode::kStoreInLiteral:
- case AccessMode::kStore: {
- // We need a FrameState for the setter stub to restore the correct
- // context and return the appropriate value to fullcodegen.
- FrameStateFunctionInfo const* frame_info0 =
- common()->CreateFrameStateFunctionInfo(FrameStateType::kSetterStub,
- 2, 0, shared_info);
- Node* frame_state0 = graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_info0),
- graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
- receiver, value),
- jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
- context, target, frame_state);
-
- // Introduce the call to the setter function.
- if (access_info.constant()->IsJSFunction()) {
- effect = control = graph()->NewNode(
- javascript()->Call(3, CallFrequency(), VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined),
- target, receiver, value, context, frame_state0, effect, control);
- } else {
- DCHECK(access_info.constant()->IsFunctionTemplateInfo());
- Handle<FunctionTemplateInfo> function_template_info(
- Handle<FunctionTemplateInfo>::cast(access_info.constant()));
- DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, value, effect, control,
- shared_info, function_template_info);
- value = value_effect_control.value();
- effect = value_effect_control.effect();
- control = value_effect_control.control();
- }
- break;
- }
- }
+ value =
+ InlinePropertySetterCall(receiver, value, context, frame_state, &effect,
+ &control, if_exceptions, access_info);
} else {
DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
FieldIndex const field_index = access_info.field_index();
Type* const field_type = access_info.field_type();
MachineRepresentation const field_representation =
access_info.field_representation();
- if (access_mode == AccessMode::kLoad) {
- if (access_info.holder().ToHandle(&holder)) {
- receiver = jsgraph()->Constant(holder);
- }
- // Optimize immutable property loads.
- HeapObjectMatcher m(receiver);
- if (m.HasValue() && m.Value()->IsJSObject()) {
- // TODO(ishell): Use something simpler like
- //
- // Handle<Object> value =
- // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
- // Representation::Tagged(), field_index);
- //
- // here, once we have the immutable bit in the access_info.
-
- // TODO(turbofan): Given that we already have the field_index here, we
- // might be smarter in the future and not rely on the LookupIterator,
- // but for now let's just do what Crankshaft does.
- LookupIterator it(m.Value(), name,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.state() == LookupIterator::DATA) {
- bool is_reaonly_non_configurable =
- it.IsReadOnly() && !it.IsConfigurable();
- if (is_reaonly_non_configurable ||
- (FLAG_track_constant_fields &&
- access_info.IsDataConstantField())) {
- Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
- if (!is_reaonly_non_configurable) {
- // It's necessary to add dependency on the map that introduced
- // the field.
- DCHECK(access_info.IsDataConstantField());
- DCHECK(!it.is_dictionary_holder());
- Handle<Map> field_owner_map = it.GetFieldOwnerMap();
- dependencies()->AssumeFieldOwner(field_owner_map);
- }
- return ValueEffectControl(value, effect, control);
- }
- }
- }
- }
Node* storage = receiver;
if (!field_index.is_inobject()) {
storage = effect = graph()->NewNode(
@@ -1615,196 +1727,158 @@ JSNativeContextSpecialization::BuildPropertyAccess(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier};
- if (access_mode == AccessMode::kLoad) {
- if (field_representation == MachineRepresentation::kFloat64) {
+ bool store_to_constant_field = FLAG_track_constant_fields &&
+ (access_mode == AccessMode::kStore) &&
+ access_info.IsDataConstantField();
+
+ DCHECK(access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral);
+ switch (field_representation) {
+ case MachineRepresentation::kFloat64: {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
- FieldAccess const storage_access = {kTaggedBase,
- field_index.offset(),
- name,
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access), storage,
- effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- }
- } else if (field_representation ==
- MachineRepresentation::kTaggedPointer) {
- // Remember the map of the field value, if its map is stable. This is
- // used by the LoadElimination to eliminate map checks on the result.
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- if (field_map->is_stable()) {
- dependencies()->AssumeMapStable(field_map);
- field_access.map = field_map;
+ if (access_info.HasTransitionMap()) {
+ // Allocate a MutableHeapNumber for the new property.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable),
+ effect);
+ Node* box = effect = graph()->NewNode(
+ simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
+ jsgraph()->Constant(HeapNumber::kSize), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), box,
+ jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
+ effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
+ box, value, effect, control);
+ value = effect =
+ graph()->NewNode(common()->FinishRegion(), box, effect);
+
+ field_access.type = Type::Any();
+ field_access.machine_type = MachineType::TaggedPointer();
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+ } else {
+ // We just store directly to the MutableHeapNumber.
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = effect =
+ graph()->NewNode(simplified()->LoadField(storage_access),
+ storage, effect, control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ field_access.machine_type = MachineType::Float64();
}
}
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->NumberEqual(),
+ current_value, value);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ return ValueEffectControl(value, effect, control);
+ }
+ break;
}
- value = effect = graph()->NewNode(simplified()->LoadField(field_access),
- storage, effect, control);
- } else {
- bool store_to_constant_field = FLAG_track_constant_fields &&
- (access_mode == AccessMode::kStore) &&
- access_info.IsDataConstantField();
-
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
- switch (field_representation) {
- case MachineRepresentation::kFloat64: {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
- if (!field_index.is_inobject() || field_index.is_hidden_field() ||
- !FLAG_unbox_double_fields) {
- if (access_info.HasTransitionMap()) {
- // Allocate a MutableHeapNumber for the new property.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable),
- effect);
- Node* box = effect = graph()->NewNode(
- simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), box,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- box, value, effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), box, effect);
-
- field_access.type = Type::Any();
- field_access.machine_type = MachineType::TaggedPointer();
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- // We just store directly to the MutableHeapNumber.
- FieldAccess const storage_access = {kTaggedBase,
- field_index.offset(),
- name,
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
- storage = effect =
- graph()->NewNode(simplified()->LoadField(storage_access),
- storage, effect, control);
- field_access.offset = HeapNumber::kValueOffset;
- field_access.name = MaybeHandle<Name>();
- field_access.machine_type = MachineType::Float64();
- }
- }
- if (store_to_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect =
- graph()->NewNode(simplified()->LoadField(field_access), storage,
- effect, control);
-
- Node* check = graph()->NewNode(simplified()->NumberEqual(),
- current_value, value);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
- control);
- return ValueEffectControl(value, effect, control);
- }
- break;
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTagged:
+ if (store_to_constant_field) {
+ DCHECK(!access_info.HasTransitionMap());
+ // If the field is constant check that the value we are going
+ // to store matches current value.
+ Node* current_value = effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, effect, control);
+
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+ current_value, value);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ return ValueEffectControl(value, effect, control);
}
- case MachineRepresentation::kTaggedSigned:
- case MachineRepresentation::kTaggedPointer:
- case MachineRepresentation::kTagged:
- if (store_to_constant_field) {
- DCHECK(!access_info.HasTransitionMap());
- // If the field is constant check that the value we are going
- // to store matches current value.
- Node* current_value = effect =
- graph()->NewNode(simplified()->LoadField(field_access), storage,
- effect, control);
-
- Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
- current_value, value);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
- control);
- return ValueEffectControl(value, effect, control);
- }
-
- if (field_representation == MachineRepresentation::kTaggedSigned) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
- field_access.write_barrier_kind = kNoWriteBarrier;
-
- } else if (field_representation ==
- MachineRepresentation::kTaggedPointer) {
- // Ensure that {value} is a HeapObject.
- value = BuildCheckHeapObject(value, &effect, control);
- Handle<Map> field_map;
- if (access_info.field_map().ToHandle(&field_map)) {
- // Emit a map check for the value.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone,
- ZoneHandleSet<Map>(field_map)),
- value, effect, control);
- }
- field_access.write_barrier_kind = kPointerWriteBarrier;
- } else {
- DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
+ if (field_representation == MachineRepresentation::kTaggedSigned) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+ effect, control);
+ field_access.write_barrier_kind = kNoWriteBarrier;
+
+ } else if (field_representation ==
+ MachineRepresentation::kTaggedPointer) {
+ // Ensure that {value} is a HeapObject.
+ value = access_builder.BuildCheckHeapObject(value, &effect, control);
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ // Emit a map check for the value.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map)),
+ value, effect, control);
}
- break;
- case MachineRepresentation::kNone:
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8:
- case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
- case MachineRepresentation::kWord64:
- case MachineRepresentation::kFloat32:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
- UNREACHABLE();
- break;
- }
- // Check if we need to perform a transitioning store.
- Handle<Map> transition_map;
- if (access_info.transition_map().ToHandle(&transition_map)) {
- // Check if we need to grow the properties backing store
- // with this transitioning store.
- Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
- isolate());
- if (original_map->unused_property_fields() == 0) {
- DCHECK(!field_index.is_inobject());
-
- // Reallocate the properties {storage}.
- storage = effect = BuildExtendPropertiesBackingStore(
- original_map, storage, effect, control);
-
- // Perform the actual store.
- effect = graph()->NewNode(simplified()->StoreField(field_access),
- storage, value, effect, control);
-
- // Atomically switch to the new properties below.
- field_access = AccessBuilder::ForJSObjectProperties();
- value = storage;
- storage = receiver;
+ field_access.write_barrier_kind = kPointerWriteBarrier;
+
+ } else {
+ DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
}
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kObservable), effect);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(field_access),
- storage, value, effect, control);
- effect = graph()->NewNode(common()->FinishRegion(),
- jsgraph()->UndefinedConstant(), effect);
- } else {
- // Regular non-transitioning field store.
+ break;
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kWord64:
+ case MachineRepresentation::kFloat32:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ break;
+ }
+ // Check if we need to perform a transitioning store.
+ Handle<Map> transition_map;
+ if (access_info.transition_map().ToHandle(&transition_map)) {
+ // Check if we need to grow the properties backing store
+ // with this transitioning store.
+ Handle<Map> original_map(Map::cast(transition_map->GetBackPointer()),
+ isolate());
+ if (original_map->unused_property_fields() == 0) {
+ DCHECK(!field_index.is_inobject());
+
+ // Reallocate the properties {storage}.
+ storage = effect = BuildExtendPropertiesBackingStore(
+ original_map, storage, effect, control);
+
+ // Perform the actual store.
effect = graph()->NewNode(simplified()->StoreField(field_access),
storage, value, effect, control);
+
+ // Atomically switch to the new properties below.
+ field_access = AccessBuilder::ForJSObjectProperties();
+ value = storage;
+ storage = receiver;
}
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), receiver,
+ jsgraph()->Constant(transition_map), effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+ value, effect, control);
+ effect = graph()->NewNode(common()->FinishRegion(),
+ jsgraph()->UndefinedConstant(), effect);
+ } else {
+ // Regular non-transitioning field store.
+ effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
+ value, effect, control);
}
}
@@ -1838,6 +1912,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
}
Handle<Map> receiver_map(map, isolate());
+ if (!Map::TryUpdate(receiver_map).ToHandle(&receiver_map)) return NoChange();
+
Handle<Name> cached_name =
handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
@@ -1855,10 +1931,10 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* control = NodeProperties::GetControlInput(node);
// Monomorphic property access.
- receiver = BuildCheckHeapObject(receiver, &effect, control);
-
- effect =
- BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+ PropertyAccessBuilder access_builder(jsgraph(), dependencies());
+ receiver = access_builder.BuildCheckHeapObject(receiver, &effect, control);
+ access_builder.BuildCheckMaps(receiver, &effect, control,
+ access_info.receiver_maps());
// Ensure that {name} matches the cached name.
Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1873,7 +1949,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
// Generate the actual property access.
ValueEffectControl continuation = BuildPropertyAccess(
receiver, value, context, frame_state_lazy, effect, control, cached_name,
- access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
+ nullptr, access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1895,7 +1971,6 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
break;
}
UNREACHABLE();
- return kExternalInt8Array;
}
} // namespace
@@ -2060,7 +2135,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store.
if (access_mode == AccessMode::kStore &&
- IsFastSmiOrObjectElementsKind(elements_kind) &&
+ IsSmiOrObjectElementsKind(elements_kind) &&
store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
effect = graph()->NewNode(
simplified()->CheckMaps(
@@ -2101,10 +2176,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// Compute the element access.
Type* element_type = Type::NonInternal();
MachineType element_machine_type = MachineType::AnyTagged();
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
element_type = Type::Number();
element_machine_type = MachineType::Float64();
- } else if (IsFastSmiElementsKind(elements_kind)) {
+ } else if (IsSmiElementsKind(elements_kind)) {
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
@@ -2116,12 +2191,12 @@ JSNativeContextSpecialization::BuildElementAccess(
if (access_mode == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
- if (IsHoleyElementsKind(elements_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
element_access.type =
Type::Union(element_type, Type::Hole(), graph()->zone());
}
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
element_access.machine_type = MachineType::AnyTagged();
}
// Perform the actual backing store access.
@@ -2130,8 +2205,8 @@ JSNativeContextSpecialization::BuildElementAccess(
index, effect, control);
// Handle loading from holey backing stores correctly, by either mapping
// the hole to undefined if possible, or deoptimizing otherwise.
- if (elements_kind == FAST_HOLEY_ELEMENTS ||
- elements_kind == FAST_HOLEY_SMI_ELEMENTS) {
+ if (elements_kind == HOLEY_ELEMENTS ||
+ elements_kind == HOLEY_SMI_ELEMENTS) {
// Check if we are allowed to turn the hole into undefined.
if (CanTreatHoleAsUndefined(receiver_maps)) {
// Turn the hole into undefined.
@@ -2139,10 +2214,10 @@ JSNativeContextSpecialization::BuildElementAccess(
value);
} else {
// Bailout if we see the hole.
- value = effect = graph()->NewNode(simplified()->CheckTaggedHole(),
+ value = effect = graph()->NewNode(simplified()->CheckNotTaggedHole(),
value, effect, control);
}
- } else if (elements_kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ } else if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
// Perform the hole check on the result.
CheckFloat64HoleMode mode = CheckFloat64HoleMode::kNeverReturnHole;
// Check if we are allowed to return the hole directly.
@@ -2155,10 +2230,10 @@ JSNativeContextSpecialization::BuildElementAccess(
}
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
- if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsSmiElementsKind(elements_kind)) {
value = effect =
graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
+ } else if (IsDoubleElementsKind(elements_kind)) {
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
effect, control);
// Make sure we do not store signalling NaNs into double arrays.
@@ -2166,7 +2241,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Ensure that copy-on-write backing store is writable.
- if (IsFastSmiOrObjectElementsKind(elements_kind) &&
+ if (IsSmiOrObjectElementsKind(elements_kind) &&
store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
@@ -2180,10 +2255,10 @@ JSNativeContextSpecialization::BuildElementAccess(
if (receiver_is_jsarray) {
flags |= GrowFastElementsFlag::kArrayObject;
}
- if (IsHoleyElementsKind(elements_kind)) {
+ if (IsHoleyOrDictionaryElementsKind(elements_kind)) {
flags |= GrowFastElementsFlag::kHoleyElements;
}
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsDoubleElementsKind(elements_kind)) {
flags |= GrowFastElementsFlag::kDoubleElements;
}
elements = effect = graph()->NewNode(
@@ -2200,112 +2275,6 @@ JSNativeContextSpecialization::BuildElementAccess(
return ValueEffectControl(value, effect, control);
}
-JSNativeContextSpecialization::ValueEffectControl
-JSNativeContextSpecialization::InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
- Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
- Handle<FunctionTemplateInfo> function_template_info) {
- Handle<CallHandlerInfo> call_handler_info = handle(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate());
- Handle<Object> call_data_object(call_handler_info->data(), isolate());
-
- // Only setters have a value.
- int const argc = value == nullptr ? 0 : 1;
- // The stub always expects the receiver as the first param on the stack.
- CallApiCallbackStub stub(
- isolate(), argc,
- true /* FunctionTemplateInfo doesn't have an associated context. */);
- CallInterfaceDescriptor call_interface_descriptor =
- stub.GetCallInterfaceDescriptor();
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), call_interface_descriptor,
- call_interface_descriptor.GetStackParameterCount() + argc +
- 1 /* implicit receiver */,
- CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
- MachineType::AnyTagged(), 1);
-
- Node* data = jsgraph()->Constant(call_data_object);
- ApiFunction function(v8::ToCData<Address>(call_handler_info->callback()));
- Node* function_reference =
- graph()->NewNode(common()->ExternalConstant(ExternalReference(
- &function, ExternalReference::DIRECT_API_CALL, isolate())));
- Node* code = jsgraph()->HeapConstant(stub.GetCode());
-
- // Add CallApiCallbackStub's register argument as well.
- Node* inputs[11] = {
- code, target, data, receiver /* holder */, function_reference, receiver};
- int index = 6 + argc;
- inputs[index++] = context;
- inputs[index++] = frame_state;
- inputs[index++] = effect;
- inputs[index++] = control;
- // This needs to stay here because of the edge case described in
- // http://crbug.com/675648.
- if (value != nullptr) {
- inputs[6] = value;
- }
-
- Node* control0;
- Node* effect0;
- Node* value0 = effect0 = control0 =
- graph()->NewNode(common()->Call(call_descriptor), index, inputs);
- return ValueEffectControl(value0, effect0, control0);
-}
-
-Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
- Node** effect,
- Node* control) {
- switch (receiver->opcode()) {
- case IrOpcode::kHeapConstant:
- case IrOpcode::kJSCreate:
- case IrOpcode::kJSCreateArguments:
- case IrOpcode::kJSCreateArray:
- case IrOpcode::kJSCreateClosure:
- case IrOpcode::kJSCreateIterResultObject:
- case IrOpcode::kJSCreateLiteralArray:
- case IrOpcode::kJSCreateLiteralObject:
- case IrOpcode::kJSCreateLiteralRegExp:
- case IrOpcode::kJSConvertReceiver:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSToString:
- case IrOpcode::kJSToObject:
- case IrOpcode::kJSTypeOf: {
- return receiver;
- }
- default: {
- return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, *effect, control);
- }
- }
-}
-
-Node* JSNativeContextSpecialization::BuildCheckMaps(
- Node* receiver, Node* effect, Node* control,
- MapHandles const& receiver_maps) {
- HeapObjectMatcher m(receiver);
- if (m.HasValue()) {
- Handle<Map> receiver_map(m.Value()->map(), isolate());
- if (receiver_map->is_stable()) {
- for (Handle<Map> map : receiver_maps) {
- if (map.is_identical_to(receiver_map)) {
- dependencies()->AssumeMapStable(receiver_map);
- return effect;
- }
- }
- }
- }
- ZoneHandleSet<Map> maps;
- CheckMapsFlags flags = CheckMapsFlag::kNone;
- for (Handle<Map> map : receiver_maps) {
- maps.insert(map, graph()->zone());
- if (map->is_migration_target()) {
- flags |= CheckMapsFlag::kTryMigrateInstance;
- }
- }
- return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
- effect, control);
-}
-
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
Handle<Map> map, Node* properties, Node* effect, Node* control) {
// TODO(bmeurer/jkummerow): Property deletions can undo map transitions
@@ -2339,10 +2308,10 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
common()->BeginRegion(RegionObservability::kNotObservable), effect);
Node* new_properties = effect = graph()->NewNode(
simplified()->Allocate(Type::OtherInternal(), NOT_TENURED),
- jsgraph()->Constant(FixedArray::SizeFor(new_length)), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- new_properties, jsgraph()->FixedArrayMapConstant(),
- effect, control);
+ jsgraph()->Constant(PropertyArray::SizeFor(new_length)), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForMap()), new_properties,
+ jsgraph()->PropertyArrayMapConstant(), effect, control);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForFixedArrayLength()),
new_properties, jsgraph()->Constant(new_length), effect, control);
@@ -2354,107 +2323,30 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
return graph()->NewNode(common()->FinishRegion(), new_properties, effect);
}
-void JSNativeContextSpecialization::AssumePrototypesStable(
- MapHandles const& receiver_maps, Handle<JSObject> holder) {
- // Determine actual holder and perform prototype chain checks.
- for (auto map : receiver_maps) {
- // Perform the implicit ToObject for primitives here.
- // Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(map, native_context())
- .ToHandle(&constructor)) {
- map = handle(constructor->initial_map(), isolate());
- }
- dependencies()->AssumePrototypeMapsStable(map, holder);
- }
-}
-
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
MapHandles const& receiver_maps) {
- // Check if the array prototype chain is intact.
- if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
-
- // Make sure both the initial Array and Object prototypes are stable.
- Handle<JSObject> initial_array_prototype(
- native_context()->initial_array_prototype(), isolate());
- Handle<JSObject> initial_object_prototype(
- native_context()->initial_object_prototype(), isolate());
- if (!initial_array_prototype->map()->is_stable() ||
- !initial_object_prototype->map()->is_stable()) {
- return false;
- }
-
- // Check if all {receiver_maps} either have the initial Array.prototype
- // or the initial Object.prototype as their prototype, as those are
- // guarded by the array protector cell.
- for (Handle<Map> map : receiver_maps) {
- if (map->prototype() != *initial_array_prototype &&
- map->prototype() != *initial_object_prototype) {
+ // Check if all {receiver_maps} either have one of the initial Array.prototype
+ // or Object.prototype objects as their prototype (in any of the current
+ // native contexts, as the global Array protector works isolate-wide).
+ for (Handle<Map> receiver_map : receiver_maps) {
+ DisallowHeapAllocation no_gc;
+ Object* const receiver_prototype = receiver_map->prototype();
+ if (!isolate()->IsInAnyContext(receiver_prototype,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX) &&
+ !isolate()->IsInAnyContext(receiver_prototype,
+ Context::INITIAL_OBJECT_PROTOTYPE_INDEX)) {
return false;
}
}
- // Install code dependencies on the prototype maps.
- for (Handle<Map> map : receiver_maps) {
- dependencies()->AssumePrototypeMapsStable(map, initial_object_prototype);
- }
+ // Check if the array prototype chain is intact.
+ if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
// Install code dependency on the array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
return true;
}
-JSNativeContextSpecialization::InferHasInPrototypeChainResult
-JSNativeContextSpecialization::InferHasInPrototypeChain(
- Node* receiver, Node* effect, Handle<JSReceiver> prototype) {
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
-
- // Check if either all or none of the {receiver_maps} have the given
- // {prototype} in their prototype chain.
- bool all = true;
- bool none = true;
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- Handle<Map> receiver_map = receiver_maps[i];
- if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
- return kMayBeInPrototypeChain;
- }
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- // In case of an unreliable {result} we need to ensure that all
- // {receiver_maps} are stable, because otherwise we cannot trust
- // the {receiver_maps} information, since arbitrary side-effects
- // may have happened.
- if (!receiver_map->is_stable()) {
- return kMayBeInPrototypeChain;
- }
- }
- for (PrototypeIterator j(receiver_map);; j.Advance()) {
- if (j.IsAtEnd()) {
- all = false;
- break;
- }
- Handle<JSReceiver> const current =
- PrototypeIterator::GetCurrent<JSReceiver>(j);
- if (current.is_identical_to(prototype)) {
- none = false;
- break;
- }
- if (!current->map()->is_stable() ||
- current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
- return kMayBeInPrototypeChain;
- }
- }
- }
- DCHECK_IMPLIES(all, !none);
- DCHECK_IMPLIES(none, !all);
-
- if (all) return kIsInPrototypeChain;
- if (none) return kIsNotInPrototypeChain;
- return kMayBeInPrototypeChain;
-}
-
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
MapHandles* receiver_maps) {
@@ -2558,10 +2450,6 @@ Factory* JSNativeContextSpecialization::factory() const {
return isolate()->factory();
}
-MachineOperatorBuilder* JSNativeContextSpecialization::machine() const {
- return jsgraph()->machine();
-}
-
CommonOperatorBuilder* JSNativeContextSpecialization::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 2f9df08f81..a9b04a3e08 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -50,12 +50,18 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
CompilationDependencies* dependencies,
Zone* zone);
+ const char* reducer_name() const override {
+ return "JSNativeContextSpecialization";
+ }
+
Reduction Reduce(Node* node) final;
private:
Reduction ReduceJSAdd(Node* node);
+ Reduction ReduceJSStringConcat(Node* node);
Reduction ReduceJSGetSuperConstructor(Node* node);
Reduction ReduceJSInstanceOf(Node* node);
+ Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadGlobal(Node* node);
@@ -96,6 +102,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// A triple of nodes that represents a continuation.
class ValueEffectControl final {
public:
+ ValueEffectControl()
+ : value_(nullptr), effect_(nullptr), control_(nullptr) {}
ValueEffectControl(Node* value, Node* effect, Node* control)
: value_(value), effect_(effect), control_(control) {}
@@ -104,19 +112,45 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Node* control() const { return control_; }
private:
- Node* const value_;
- Node* const effect_;
- Node* const control_;
+ Node* value_;
+ Node* effect_;
+ Node* control_;
};
// Construct the appropriate subgraph for property access.
- ValueEffectControl BuildPropertyAccess(Node* receiver, Node* value,
- Node* context, Node* frame_state,
- Node* effect, Node* control,
- Handle<Name> name,
- PropertyAccessInfo const& access_info,
- AccessMode access_mode,
- LanguageMode language_mode);
+ ValueEffectControl BuildPropertyAccess(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node* effect, Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode, LanguageMode language_mode);
+ ValueEffectControl BuildPropertyLoad(Node* receiver, Node* context,
+ Node* frame_state, Node* effect,
+ Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info,
+ LanguageMode language_mode);
+
+ ValueEffectControl BuildPropertyStore(
+ Node* receiver, Node* value, Node* context, Node* frame_state,
+ Node* effect, Node* control, Handle<Name> name,
+ ZoneVector<Node*>* if_exceptions, PropertyAccessInfo const& access_info,
+ AccessMode access_mode, LanguageMode language_mode);
+
+ // Helpers for accessor inlining.
+ Node* InlinePropertyGetterCall(Node* receiver, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info);
+ Node* InlinePropertySetterCall(Node* receiver, Node* value, Node* context,
+ Node* frame_state, Node** effect,
+ Node** control,
+ ZoneVector<Node*>* if_exceptions,
+ PropertyAccessInfo const& access_info);
+ Node* InlineApiCall(Node* receiver, Node* holder, Node* context, Node* target,
+ Node* frame_state, Node* value, Node** effect,
+ Node** control, Handle<SharedFunctionInfo> shared_info,
+ Handle<FunctionTemplateInfo> function_template_info);
// Construct the appropriate subgraph for element access.
ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -126,38 +160,15 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessStoreMode store_mode);
- // Construct an appropriate heap object check.
- Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
-
- // Construct an appropriate map check.
- Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
- MapHandles const& maps);
-
// Construct appropriate subgraph to extend properties backing store.
Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
Node* effect, Node* control);
- // Adds stability dependencies on all prototypes of every class in
- // {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(MapHandles const& receiver_maps,
- Handle<JSObject> holder);
-
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
bool CanTreatHoleAsUndefined(MapHandles const& receiver_maps);
- // Checks if we know at compile time that the {receiver} either definitely
- // has the {prototype} in it's prototype chain, or the {receiver} definitely
- // doesn't have the {prototype} in it's prototype chain.
- enum InferHasInPrototypeChainResult {
- kIsInPrototypeChain,
- kIsNotInPrototypeChain,
- kMayBeInPrototypeChain
- };
- InferHasInPrototypeChainResult InferHasInPrototypeChain(
- Node* receiver, Node* effect, Handle<JSReceiver> prototype);
-
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
bool ExtractReceiverMaps(Node* receiver, Node* effect,
@@ -174,11 +185,16 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
- ValueEffectControl InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state,
- Node* parameter, Node* effect, Node* control,
- Handle<SharedFunctionInfo> shared_info,
- Handle<FunctionTemplateInfo> function_template_info);
+ // Checks if we know at compile time that the {receiver} either definitely
+ // has the {prototype} in it's prototype chain, or the {receiver} definitely
+ // doesn't have the {prototype} in it's prototype chain.
+ enum InferHasInPrototypeChainResult {
+ kIsInPrototypeChain,
+ kIsNotInPrototypeChain,
+ kMayBeInPrototypeChain
+ };
+ InferHasInPrototypeChainResult InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<HeapObject> prototype);
// Script context lookup logic.
struct ScriptContextTableLookupResult;
@@ -192,7 +208,6 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
- MachineOperatorBuilder* machine() const;
Flags flags() const { return flags_; }
Handle<JSGlobalObject> global_object() const { return global_object_; }
Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index b8156a23f4..ff025d25fd 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -22,6 +22,12 @@ std::ostream& operator<<(std::ostream& os, CallFrequency f) {
return os << f.value();
}
+CallFrequency CallFrequencyOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ op->opcode() == IrOpcode::kJSConstructWithArrayLike);
+ return OpParameter<CallFrequency>(op);
+}
+
VectorSlotPair::VectorSlotPair() {}
@@ -116,10 +122,31 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const* op) {
return OpParameter<SpreadWithArityParameter>(op);
}
+bool operator==(StringConcatParameter const& lhs,
+ StringConcatParameter const& rhs) {
+ return lhs.operand_count() == rhs.operand_count();
+}
+
+bool operator!=(StringConcatParameter const& lhs,
+ StringConcatParameter const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StringConcatParameter const& p) {
+ return base::hash_combine(p.operand_count());
+}
+
+std::ostream& operator<<(std::ostream& os, StringConcatParameter const& p) {
+ return os << p.operand_count();
+}
+
+StringConcatParameter const& StringConcatParameterOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSStringConcat);
+ return OpParameter<StringConcatParameter>(op);
+}
+
std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
- os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
- << p.tail_call_mode();
- return os;
+ return os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode();
}
const CallParameters& CallParametersOf(const Operator* op) {
@@ -129,8 +156,7 @@ const CallParameters& CallParametersOf(const Operator* op) {
std::ostream& operator<<(std::ostream& os,
CallForwardVarargsParameters const& p) {
- return os << p.arity() << ", " << p.start_index() << ", "
- << p.tail_call_mode();
+ return os << p.arity() << ", " << p.start_index();
}
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
@@ -533,31 +559,6 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
-bool operator==(GeneratorStoreParameters const& lhs,
- GeneratorStoreParameters const& rhs) {
- return lhs.register_count() == rhs.register_count() &&
- lhs.suspend_type() == rhs.suspend_type();
-}
-bool operator!=(GeneratorStoreParameters const& lhs,
- GeneratorStoreParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-size_t hash_value(GeneratorStoreParameters const& p) {
- return base::hash_combine(p.register_count(),
- static_cast<int>(p.suspend_type()));
-}
-
-std::ostream& operator<<(std::ostream& os, GeneratorStoreParameters const& p) {
- const char* suspend_type = SuspendTypeFor(p.suspend_type());
- return os << p.register_count() << " (" << suspend_type << ")";
-}
-
-const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op) {
- DCHECK_EQ(op->opcode(), IrOpcode::kJSGeneratorStore);
- return OpParameter<GeneratorStoreParameters>(op);
-}
-
BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
return OpParameter<BinaryOperationHint>(op);
@@ -590,12 +591,14 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToObject, Operator::kFoldable, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
+ V(ToPrimitiveToString, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kNoProperties, 2, 1) \
V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(ClassOf, Operator::kPure, 1, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
+ V(HasInPrototypeChain, Operator::kNoProperties, 2, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
V(ForInNext, Operator::kNoProperties, 4, 1) \
@@ -643,8 +646,11 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kSignedSmall> \
k##Name##SignedSmallOperator; \
Name##Operator<BinaryOperationHint::kSigned32> k##Name##Signed32Operator; \
+ Name##Operator<BinaryOperationHint::kNumber> k##Name##NumberOperator; \
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
+ Name##Operator<BinaryOperationHint::kNonEmptyString> \
+ k##Name##NonEmptyStringOperator; \
Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
@@ -667,6 +673,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<CompareOperationHint::kInternalizedString> \
k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
@@ -695,8 +702,12 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##SignedSmallOperator; \
case BinaryOperationHint::kSigned32: \
return &cache_.k##Name##Signed32Operator; \
+ case BinaryOperationHint::kNumber: \
+ return &cache_.k##Name##NumberOperator; \
case BinaryOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
+ case BinaryOperationHint::kNonEmptyString: \
+ return &cache_.k##Name##NonEmptyStringOperator; \
case BinaryOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
case BinaryOperationHint::kAny: \
@@ -723,6 +734,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##InternalizedStringOperator; \
case CompareOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case CompareOperationHint::kSymbol: \
+ return &cache_.k##Name##SymbolOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
@@ -734,6 +747,15 @@ BINARY_OP_LIST(BINARY_OP)
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
+const Operator* JSOperatorBuilder::StringConcat(int operand_count) {
+ StringConcatParameter parameters(operand_count);
+ return new (zone()) Operator1<StringConcatParameter>( // --
+ IrOpcode::kJSStringConcat, Operator::kNoProperties, // opcode
+ "JSStringConcat", // name
+ operand_count, 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
const VectorSlotPair& feedback) {
FeedbackParameter parameters(feedback);
@@ -754,9 +776,9 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
hints); // parameter
}
-const Operator* JSOperatorBuilder::CallForwardVarargs(
- size_t arity, uint32_t start_index, TailCallMode tail_call_mode) {
- CallForwardVarargsParameters parameters(arity, start_index, tail_call_mode);
+const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
+ uint32_t start_index) {
+ CallForwardVarargsParameters parameters(arity, start_index);
return new (zone()) Operator1<CallForwardVarargsParameters>( // --
IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties, // opcode
"JSCallForwardVarargs", // name
@@ -766,10 +788,8 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(
const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode,
- TailCallMode tail_call_mode) {
- CallParameters parameters(arity, frequency, feedback, tail_call_mode,
- convert_mode);
+ ConvertReceiverMode convert_mode) {
+ CallParameters parameters(arity, frequency, feedback, convert_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCall, Operator::kNoProperties, // opcode
"JSCall", // name
@@ -777,6 +797,14 @@ const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
+ return new (zone()) Operator1<CallFrequency>( // --
+ IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode
+ "JSCallWithArrayLike", // name
+ 3, 1, 1, 1, 1, 2, // counts
+ frequency); // parameter
+}
+
const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
SpreadWithArityParameter parameters(arity);
return new (zone()) Operator1<SpreadWithArityParameter>( // --
@@ -831,6 +859,16 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
parameters); // parameter
}
+const Operator* JSOperatorBuilder::ConstructWithArrayLike(
+ CallFrequency frequency) {
+ return new (zone()) Operator1<CallFrequency>( // --
+ IrOpcode::kJSConstructWithArrayLike, // opcode
+ Operator::kNoProperties, // properties
+ "JSConstructWithArrayLike", // name
+ 3, 1, 1, 1, 1, 2, // counts
+ frequency); // parameter
+}
+
const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
SpreadWithArityParameter parameters(arity);
return new (zone()) Operator1<SpreadWithArityParameter>( // --
@@ -869,14 +907,12 @@ const Operator* JSOperatorBuilder::LoadProperty(
access); // parameter
}
-const Operator* JSOperatorBuilder::GeneratorStore(int register_count,
- SuspendFlags suspend_flags) {
- GeneratorStoreParameters parameters(register_count, suspend_flags);
- return new (zone()) Operator1<GeneratorStoreParameters>( // --
- IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
- "JSGeneratorStore", // name
- 3 + register_count, 1, 1, 0, 1, 0, // counts
- parameters); // parameter
+const Operator* JSOperatorBuilder::GeneratorStore(int register_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kJSGeneratorStore, Operator::kNoThrow, // opcode
+ "JSGeneratorStore", // name
+ 3 + register_count, 1, 1, 0, 1, 0, // counts
+ register_count); // parameter
}
const Operator* JSOperatorBuilder::GeneratorRestoreRegister(int index) {
@@ -1005,7 +1041,6 @@ const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
type); // parameter
}
-
const Operator* JSOperatorBuilder::CreateArray(size_t arity,
Handle<AllocationSite> site) {
// constructor, new_target, arg1, ..., argN
@@ -1022,11 +1057,11 @@ const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, feedback, pretenure);
- return new (zone()) Operator1<CreateClosureParameters>( // --
- IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
- "JSCreateClosure", // name
- 0, 1, 1, 1, 1, 0, // counts
- parameters); // parameter
+ return new (zone()) Operator1<CreateClosureParameters>( // --
+ IrOpcode::kJSCreateClosure, Operator::kEliminatable, // opcode
+ "JSCreateClosure", // name
+ 0, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 5ac3b6769e..4c9f815cd1 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -57,6 +57,8 @@ class CallFrequency final {
std::ostream& operator<<(std::ostream&, CallFrequency);
+CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
+
// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
// is used to access the type feedback for a certain {Node}.
class V8_EXPORT_PRIVATE VectorSlotPair {
@@ -180,17 +182,12 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const*);
// is used as parameter by JSCallForwardVarargs operators.
class CallForwardVarargsParameters final {
public:
- CallForwardVarargsParameters(size_t arity, uint32_t start_index,
- TailCallMode tail_call_mode)
+ CallForwardVarargsParameters(size_t arity, uint32_t start_index)
: bit_field_(ArityField::encode(arity) |
- StartIndexField::encode(start_index) |
- TailCallModeField::encode(tail_call_mode)) {}
+ StartIndexField::encode(start_index)) {}
size_t arity() const { return ArityField::decode(bit_field_); }
uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
bool operator==(CallForwardVarargsParameters const& that) const {
return this->bit_field_ == that.bit_field_;
@@ -206,7 +203,6 @@ class CallForwardVarargsParameters final {
typedef BitField<size_t, 0, 15> ArityField;
typedef BitField<uint32_t, 15, 15> StartIndexField;
- typedef BitField<TailCallMode, 30, 1> TailCallModeField;
uint32_t const bit_field_;
};
@@ -221,11 +217,10 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency frequency,
- VectorSlotPair const& feedback, TailCallMode tail_call_mode,
+ VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
- ConvertReceiverModeField::encode(convert_mode) |
- TailCallModeField::encode(tail_call_mode)),
+ ConvertReceiverModeField::encode(convert_mode)),
frequency_(frequency),
feedback_(feedback) {}
@@ -234,9 +229,6 @@ class CallParameters final {
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
- TailCallMode tail_call_mode() const {
- return TailCallModeField::decode(bit_field_);
- }
VectorSlotPair const& feedback() const { return feedback_; }
bool operator==(CallParameters const& that) const {
@@ -253,7 +245,6 @@ class CallParameters final {
typedef BitField<size_t, 0, 29> ArityField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
- typedef BitField<TailCallMode, 31, 1> TailCallModeField;
uint32_t const bit_field_;
CallFrequency const frequency_;
@@ -619,32 +610,26 @@ std::ostream& operator<<(std::ostream&, CreateLiteralParameters const&);
const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op);
-class GeneratorStoreParameters final {
+// Defines the number of operands passed to a JSStringConcat operator.
+class StringConcatParameter final {
public:
- GeneratorStoreParameters(int register_count, SuspendFlags flags)
- : register_count_(register_count), suspend_flags_(flags) {}
+ explicit StringConcatParameter(int operand_count)
+ : operand_count_(operand_count) {}
- int register_count() const { return register_count_; }
- SuspendFlags suspend_flags() const { return suspend_flags_; }
- SuspendFlags suspend_type() const {
- return suspend_flags_ & SuspendFlags::kSuspendTypeMask;
- }
+ int operand_count() const { return operand_count_; }
private:
- int register_count_;
- SuspendFlags suspend_flags_;
+ uint32_t const operand_count_;
};
-bool operator==(GeneratorStoreParameters const&,
- GeneratorStoreParameters const&);
-bool operator!=(GeneratorStoreParameters const&,
- GeneratorStoreParameters const&);
+bool operator==(StringConcatParameter const&, StringConcatParameter const&);
+bool operator!=(StringConcatParameter const&, StringConcatParameter const&);
-size_t hash_value(GeneratorStoreParameters const&);
+size_t hash_value(StringConcatParameter const&);
-std::ostream& operator<<(std::ostream&, GeneratorStoreParameters const&);
+std::ostream& operator<<(std::ostream&, StringConcatParameter const&);
-const GeneratorStoreParameters& GeneratorStoreParametersOf(const Operator* op);
+StringConcatParameter const& StringConcatParameterOf(Operator const*);
BinaryOperationHint BinaryOperationHintOf(const Operator* op);
@@ -684,6 +669,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ToNumber();
const Operator* ToObject();
const Operator* ToString();
+ const Operator* ToPrimitiveToString();
const Operator* Create();
const Operator* CreateArguments(CreateArgumentsType type);
@@ -702,13 +688,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
- const Operator* CallForwardVarargs(size_t arity, uint32_t start_index,
- TailCallMode tail_call_mode);
+ const Operator* CallForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Call(
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
- TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny);
+ const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(uint32_t arity);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
@@ -718,6 +703,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Construct(uint32_t arity,
CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
+ const Operator* ConstructWithArrayLike(CallFrequency frequency);
const Operator* ConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -757,6 +743,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ClassOf();
const Operator* TypeOf();
+ const Operator* HasInPrototypeChain();
const Operator* InstanceOf();
const Operator* OrdinaryHasInstance();
@@ -766,12 +753,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadMessage();
const Operator* StoreMessage();
+ const Operator* StringConcat(int operand_count);
+
// Used to implement Ignition's SuspendGenerator bytecode.
- const Operator* GeneratorStore(int register_count,
- SuspendFlags suspend_flags);
+ const Operator* GeneratorStore(int register_count);
- // Used to implement Ignition's ResumeGenerator bytecode.
+ // Used to implement Ignition's RestoreGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
+ // Used to implement Ignition's RestoreGeneratorRegisters bytecode.
const Operator* GeneratorRestoreRegister(int index);
const Operator* StackCheck();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 7c70b1ea11..3398a33036 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -25,11 +25,15 @@ bool BinaryOperationHintToNumberOperationHint(
case BinaryOperationHint::kSigned32:
*number_hint = NumberOperationHint::kSigned32;
return true;
+ case BinaryOperationHint::kNumber:
+ *number_hint = NumberOperationHint::kNumber;
+ return true;
case BinaryOperationHint::kNumberOrOddball:
*number_hint = NumberOperationHint::kNumberOrOddball;
return true;
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kNonEmptyString:
case BinaryOperationHint::kString:
break;
}
@@ -82,6 +86,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -117,7 +122,6 @@ class JSSpeculativeBinopBuilder final {
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
@@ -138,7 +142,6 @@ class JSSpeculativeBinopBuilder final {
break;
}
UNREACHABLE();
- return nullptr;
}
Node* BuildSpeculativeOperation(const Operator* op) {
@@ -254,6 +257,53 @@ Reduction JSTypeHintLowering::ReduceToNumberOperation(Node* input, Node* effect,
return Reduction();
}
+Reduction JSTypeHintLowering::ReduceToPrimitiveToStringOperation(
+ Node* input, Node* effect, Node* control, FeedbackSlot slot) const {
+ DCHECK(!slot.IsInvalid());
+ BinaryOpICNexus nexus(feedback_vector(), slot);
+ BinaryOperationHint hint = nexus.GetBinaryOperationFeedback();
+ if (hint == BinaryOperationHint::kNonEmptyString) {
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->CheckNonEmptyString(), input, effect, control);
+ return Reduction(node);
+ } else if (hint == BinaryOperationHint::kString) {
+ Node* node = jsgraph()->graph()->NewNode(
+ jsgraph()->simplified()->CheckString(), input, effect, control);
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceCallOperation(const Operator* op,
+ Node* const* args,
+ int arg_count, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSCall, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ CallICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForCall)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
+Reduction JSTypeHintLowering::ReduceConstructOperation(
+ const Operator* op, Node* const* args, int arg_count, Node* effect,
+ Node* control, FeedbackSlot slot) const {
+ DCHECK_EQ(IrOpcode::kJSConstruct, op->opcode());
+ DCHECK(!slot.IsInvalid());
+ CallICNexus nexus(feedback_vector(), slot);
+ if (Node* node = TryBuildSoftDeopt(
+ nexus, effect, control,
+ DeoptimizeReason::kInsufficientTypeFeedbackForConstruct)) {
+ return Reduction(node);
+ }
+ return Reduction();
+}
+
Reduction JSTypeHintLowering::ReduceLoadNamedOperation(
const Operator* op, Node* obj, Node* effect, Node* control,
FeedbackSlot slot) const {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7bd237814d..50779c9f3c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -59,6 +59,21 @@ class JSTypeHintLowering {
Reduction ReduceToNumberOperation(Node* value, Node* effect, Node* control,
FeedbackSlot slot) const;
+ // Potential reduction to ToPrimitiveToString operations
+ Reduction ReduceToPrimitiveToStringOperation(Node* value, Node* effect,
+ Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction of call operations.
+ Reduction ReduceCallOperation(const Operator* op, Node* const* args,
+ int arg_count, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
+ // Potential reduction of construct operations.
+ Reduction ReduceConstructOperation(const Operator* op, Node* const* args,
+ int arg_count, Node* effect, Node* control,
+ FeedbackSlot slot) const;
+
// Potential reduction of property access operations.
Reduction ReduceLoadNamedOperation(const Operator* op, Node* obj,
Node* effect, Node* control,
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 64838a1f83..243a80a645 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -22,6 +22,29 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
+bool WillCreateConsString(HeapObjectMatcher left, HeapObjectMatcher right) {
+ if (right.HasValue() && right.Value()->IsString()) {
+ Handle<String> right_string = Handle<String>::cast(right.Value());
+ if (right_string->length() >= ConsString::kMinLength) return true;
+ }
+ if (left.HasValue() && left.Value()->IsString()) {
+ Handle<String> left_string = Handle<String>::cast(left.Value());
+ if (left_string->length() >= ConsString::kMinLength) {
+ // The invariant for ConsString requires the left hand side to be
+ // a sequential or external string if the right hand side is the
+ // empty string. Since we don't know anything about the right hand
+ // side here, we must ensure that the left hand side satisfies the
+ // constraints independent of the right hand side.
+ return left_string->IsSeqString() || left_string->IsExternalString();
+ }
+ }
+ return false;
+}
+
+} // namespace
+
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
// dependencies during lowering of a binop and contains numerous helper
@@ -47,6 +70,7 @@ class JSBinopReduction final {
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
+ case CompareOperationHint::kSymbol:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -85,6 +109,16 @@ class JSBinopReduction final {
return false;
}
+ bool IsSymbolCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kSymbol) &&
+ BothInputsMaybe(Type::Symbol());
+ }
+ return false;
+ }
+
// Check if a string addition will definitely result in creating a ConsString,
// i.e. if the combined length of the resulting string exceeds the ConsString
// minimum length.
@@ -95,21 +129,7 @@ class JSBinopReduction final {
((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
HeapObjectBinopMatcher m(node_);
- if (m.right().HasValue() && m.right().Value()->IsString()) {
- Handle<String> right_string = Handle<String>::cast(m.right().Value());
- if (right_string->length() >= ConsString::kMinLength) return true;
- }
- if (m.left().HasValue() && m.left().Value()->IsString()) {
- Handle<String> left_string = Handle<String>::cast(m.left().Value());
- if (left_string->length() >= ConsString::kMinLength) {
- // The invariant for ConsString requires the left hand side to be
- // a sequential or external string if the right hand side is the
- // empty string. Since we don't know anything about the right hand
- // side here, we must ensure that the left hand side satisfy the
- // constraints independent of the right hand side.
- return left_string->IsSeqString() || left_string->IsExternalString();
- }
- }
+ return WillCreateConsString(m.left(), m.right());
}
return false;
}
@@ -137,6 +157,24 @@ class JSBinopReduction final {
}
}
+ // Checks that both inputs are Symbol, and if we don't know
+ // statically that one side is already a Symbol, insert a
+ // CheckSymbol node.
+ void CheckInputsToSymbol() {
+ if (!left_type()->Is(Type::Symbol())) {
+ Node* left_input = graph()->NewNode(simplified()->CheckSymbol(), left(),
+ effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+ if (!right_type()->Is(Type::Symbol())) {
+ Node* right_input = graph()->NewNode(simplified()->CheckSymbol(), right(),
+ effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
// Checks that both inputs are String, and if we don't know
// statically that one side is already a String, insert a
// CheckString node.
@@ -307,7 +345,6 @@ class JSBinopReduction final {
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* NumberOpFromSpeculativeNumberOp() {
@@ -332,7 +369,6 @@ class JSBinopReduction final {
break;
}
UNREACHABLE();
- return nullptr;
}
bool LeftInputIs(Type* t) { return left_type()->Is(t); }
@@ -488,11 +524,9 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- empty_string_type_(
- Type::HeapConstant(factory()->empty_string(), graph()->zone())),
pointer_comparable_type_(
Type::Union(Type::Oddball(),
- Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
+ Type::Union(Type::SymbolOrReceiver(), Type::EmptyString(),
graph()->zone()),
graph()->zone())),
type_cache_(TypeCache::Get()) {
@@ -506,7 +540,8 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
JSBinopReduction r(this, node);
NumberOperationHint hint = NumberOperationHintOf(node->op());
- if (hint == NumberOperationHint::kNumberOrOddball &&
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::PlainPrimitive()) &&
r.NeitherInputCanBe(Type::StringOrReceiver())) {
// SpeculativeNumberAdd(x:-string, y:-string) =>
@@ -540,12 +575,12 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (r.LeftInputIs(empty_string_type_)) {
+ if (r.LeftInputIs(Type::EmptyString())) {
Node* value = effect = graph()->NewNode(simplified()->CheckString(),
r.right(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
- } else if (r.RightInputIs(empty_string_type_)) {
+ } else if (r.RightInputIs(Type::EmptyString())) {
Node* value = effect = graph()->NewNode(simplified()->CheckString(),
r.left(), effect, control);
ReplaceWithValue(node, value, effect, control);
@@ -594,7 +629,8 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
JSBinopReduction r(this, node);
NumberOperationHint hint = NumberOperationHintOf(node->op());
- if (hint == NumberOperationHint::kNumberOrOddball &&
+ if ((hint == NumberOperationHint::kNumber ||
+ hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::NumberOrOddball())) {
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
@@ -651,26 +687,9 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
second_type = NodeProperties::GetType(second);
}
- // Determine the {first} length.
- HeapObjectBinopMatcher m(node);
- Node* first_length =
- (m.left().HasValue() && m.left().Value()->IsString())
- ? jsgraph()->Constant(
- Handle<String>::cast(m.left().Value())->length())
- : effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- first, effect, control);
-
- // Determine the {second} length.
- Node* second_length =
- (m.right().HasValue() && m.right().Value()->IsString())
- ? jsgraph()->Constant(
- Handle<String>::cast(m.right().Value())->length())
- : effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- second, effect, control);
-
// Compute the resulting length.
+ Node* first_length = BuildGetStringLength(first, &effect, control);
+ Node* second_length = BuildGetStringLength(second, &effect, control);
Node* length =
graph()->NewNode(simplified()->NumberAdd(), first_length, second_length);
@@ -689,35 +708,175 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
} else {
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
{
- // Throw a RangeError in case of overflow.
- Node* vfalse = efalse = if_false = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kThrowInvalidStringLength),
- context, frame_state, efalse, if_false);
-
- // Update potential {IfException} uses of {node} to point to the
- // %ThrowInvalidStringLength runtime call node instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse);
- NodeProperties::ReplaceEffectInput(on_exception, efalse);
- if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
- Revisit(on_exception);
- }
-
- // The above %ThrowInvalidStringLength runtime call is an unconditional
- // throw, making it impossible to return a successful completion in this
- // case. We simply connect the successful completion to the graph end.
- if_false = graph()->NewNode(common()->Throw(), efalse, if_false);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), if_false);
- Revisit(graph()->end());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ BuildThrowStringRangeError(node, context, frame_state, effect, if_false);
}
control = graph()->NewNode(common()->IfTrue(), branch);
}
+ Node* result = effect =
+ BuildCreateConsString(first, second, length, effect, control);
+ ReplaceWithValue(node, result, effect, control);
+ return Replace(result);
+}
+
+namespace {
+
+// Check if a string concatenation will definitely result in creating a
+// ConsString for all operands, i.e. if the combined length of the first two
+// operands exceeds the ConsString minimum length and we never concatenate the
+// empty string.
+bool ShouldConcatenateAsConsStrings(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStringConcat, node->opcode());
+ DCHECK_GE(StringConcatParameterOf(node->op()).operand_count(), 3);
+
+ // Check that the concatenation of the first two strings results in a cons
+ // string.
+ HeapObjectMatcher first_matcher(NodeProperties::GetValueInput(node, 0));
+ HeapObjectMatcher second_matcher(NodeProperties::GetValueInput(node, 1));
+ if (!WillCreateConsString(first_matcher, second_matcher)) return false;
+
+ // Now check that all other RHSs of the ConsStrings will be non-empty.
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+ for (int i = 2; i < operand_count; ++i) {
+ Node* operand = NodeProperties::GetValueInput(node, i);
+ DCHECK(NodeProperties::GetType(operand)->Is(Type::String()));
+ if (!NodeProperties::GetType(operand)->Is(Type::NonEmptyString())) {
+ return false;
+ }
+ }
+
+ // If all these constraints hold, the result will definitely be a ConsString.
+ return true;
+}
+
+} // namespace
+
+Reduction JSTypedLowering::ReduceJSStringConcat(Node* node) {
+ if (ShouldConcatenateAsConsStrings(node)) {
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ int operand_count = StringConcatParameterOf(node->op()).operand_count();
+
+ // Set up string overflow check dependencies.
+ NodeVector overflow_controls(graph()->zone());
+ NodeVector overflow_effects(graph()->zone());
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // Add a code dependency on the string length overflow protector.
+ dependencies()->AssumePropertyCell(factory()->string_length_protector());
+ }
+
+ // Get the first operand and its length.
+ Node* current_result = NodeProperties::GetValueInput(node, 0);
+ Node* current_length =
+ BuildGetStringLength(current_result, &effect, control);
+
+ for (int i = 1; i < operand_count; ++i) {
+ bool last_operand = i == operand_count - 1;
+ // Get the next operand and its length.
+ Node* current_operand = NodeProperties::GetValueInput(node, i);
+ HeapObjectMatcher m(current_operand);
+ Node* operand_length =
+ BuildGetStringLength(current_operand, &effect, control);
+
+ // Update the current length and check that it it doesn't overflow.
+ current_length = graph()->NewNode(simplified()->NumberAdd(),
+ current_length, operand_length);
+ Node* check = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ current_length,
+ jsgraph()->Constant(String::kMaxLength));
+ if (isolate()->IsStringLengthOverflowIntact()) {
+ // We can just deoptimize if the {check} fails. Besides generating a
+ // shorter code sequence than the version below, this has the additional
+ // benefit of not holding on to the lazy {frame_state} and thus
+ // potentially reduces the number of live ranges and allows for more
+ // truncations.
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ } else {
+ // Otherwise insert a branch to the runtime call which throws on
+ // overflow.
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
+ overflow_controls.push_back(
+ graph()->NewNode(common()->IfFalse(), branch));
+ overflow_effects.push_back(effect);
+
+ // Build the string overflow throwing code if we have checked all the
+ // lengths.
+ if (last_operand) {
+ // Merge control and effect of overflow checks.
+ int merge_count = operand_count - 1;
+ DCHECK_EQ(overflow_controls.size(), static_cast<size_t>(merge_count));
+ DCHECK_EQ(overflow_effects.size(), static_cast<size_t>(merge_count));
+
+ Node* if_false =
+ graph()->NewNode(common()->Merge(merge_count), merge_count,
+ &overflow_controls.front());
+ overflow_effects.push_back(if_false);
+ Node* efalse =
+ graph()->NewNode(common()->EffectPhi(merge_count),
+ merge_count + 1, &overflow_effects.front());
+
+ // And throw the range error.
+ BuildThrowStringRangeError(node, context, frame_state, efalse,
+ if_false);
+ }
+ control = graph()->NewNode(common()->IfTrue(), branch);
+ }
+ current_result = effect = BuildCreateConsString(
+ current_result, current_operand, current_length, effect, control);
+ }
+ ReplaceWithValue(node, current_result, effect, control);
+ return Replace(current_result);
+ }
+ return NoChange();
+}
+
+Node* JSTypedLowering::BuildGetStringLength(Node* value, Node** effect,
+ Node* control) {
+ HeapObjectMatcher m(value);
+ Node* length =
+ (m.HasValue() && m.Value()->IsString())
+ ? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
+ : (*effect) = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForStringLength()),
+ value, *effect, control);
+ return length;
+}
+
+void JSTypedLowering::BuildThrowStringRangeError(Node* node, Node* context,
+ Node* frame_state,
+ Node* effect, Node* control) {
+ // Throw a RangeError in case of overflow.
+ Node* value = effect = control = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowInvalidStringLength), context,
+ frame_state, effect, control);
+
+ // Update potential {IfException} uses of {node} to point to the
+ // %ThrowInvalidStringLength runtime call node instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, value);
+ NodeProperties::ReplaceEffectInput(on_exception, effect);
+ control = graph()->NewNode(common()->IfSuccess(), value);
+ Revisit(on_exception);
+ }
+
+ // The above %ThrowInvalidStringLength runtime call is an unconditional
+ // throw, making it impossible to return a successful completion in this
+ // case. We simply connect the successful completion to the graph end.
+ control = graph()->NewNode(common()->Throw(), effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), control);
+ Revisit(graph()->end());
+}
+Node* JSTypedLowering::BuildCreateConsString(Node* first, Node* second,
+ Node* length, Node* effect,
+ Node* control) {
// Figure out the map for the resulting ConsString.
// TODO(turbofan): We currently just use the cons_string_map here for
// the sake of simplicity; we could also try to be smarter here and
@@ -746,13 +905,8 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
simplified()->StoreField(AccessBuilder::ForConsStringSecond()), value,
second, effect, control);
- // Morph the {node} into a {FinishRegion}.
- ReplaceWithValue(node, node, node, control);
- node->ReplaceInput(0, value);
- node->ReplaceInput(1, effect);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->FinishRegion());
- return Changed(node);
+ // Return the {FinishRegion} node.
+ return graph()->NewNode(common()->FinishRegion(), value, effect);
}
Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
@@ -761,7 +915,7 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberComparison(Node* node) {
r.BothInputsAre(Type::Unsigned32())) {
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp());
}
- return Changed(node);
+ return NoChange();
}
Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
@@ -896,6 +1050,9 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node) {
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
+ } else if (r.IsSymbolCompareOperation()) {
+ r.CheckInputsToSymbol();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
return NoChange();
}
@@ -953,6 +1110,9 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
} else if (r.IsStringCompareOperation()) {
r.CheckInputsToString();
return r.ChangeToPureOperator(simplified()->StringEqual());
+ } else if (r.IsSymbolCompareOperation()) {
+ r.CheckInputsToSymbol();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual());
}
return NoChange();
}
@@ -1128,6 +1288,7 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
}
Reduction JSTypedLowering::ReduceJSToString(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToString, node->opcode());
// Try to reduce the input first.
Node* const input = node->InputAt(0);
Reduction reduction = ReduceJSToStringInput(input);
@@ -1138,6 +1299,23 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSToPrimitiveToString(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSToPrimitiveToString, node->opcode());
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ Type* input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::Primitive())) {
+ // If node is already a primitive, then reduce to JSToString and try to
+ // reduce that further.
+ NodeProperties::ChangeOp(node, javascript()->ToString());
+ Reduction reduction = ReduceJSToString(node);
+ if (reduction.Changed()) {
+ return reduction;
+ }
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSToObject, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -1151,13 +1329,6 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
return Replace(receiver);
}
- // TODO(bmeurer/mstarzinger): Add support for lowering inside try blocks.
- if (receiver_type->Maybe(Type::NullOrUndefined()) &&
- NodeProperties::IsExceptionalCall(node)) {
- // ToObject throws for null or undefined inputs.
- return NoChange();
- }
-
// Check whether {receiver} is a spec object.
Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), receiver);
Node* branch =
@@ -1172,7 +1343,7 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
Node* rfalse;
{
// Convert {receiver} using the ToObjectStub.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState, node->op()->properties());
@@ -1181,6 +1352,18 @@ Reduction JSTypedLowering::ReduceJSToObject(Node* node) {
receiver, context, frame_state, efalse, if_false);
}
+ // Update potential {IfException} uses of {node} to point to the above
+ // ToObject stub call node instead. Note that the stub can only throw on
+ // receivers that can be null or undefined.
+ Node* on_exception = nullptr;
+ if (receiver_type->Maybe(Type::NullOrUndefined()) &&
+ NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, if_false);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse);
+ if_false = graph()->NewNode(common()->IfSuccess(), if_false);
+ Revisit(on_exception);
+ }
+
control = graph()->NewNode(common()->Merge(2), if_true, if_false);
effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
@@ -1339,6 +1522,146 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
return NoChange();
}
+Reduction JSTypedLowering::ReduceJSHasInPrototypeChain(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSHasInPrototypeChain, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Type* value_type = NodeProperties::GetType(value);
+ Node* prototype = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // If {value} cannot be a receiver, then it cannot have {prototype} in
+ // it's prototype chain (all Primitive values have a null prototype).
+ if (value_type->Is(Type::Primitive())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Loop through the {value}s prototype chain looking for the {prototype}.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), value, value, loop);
+ NodeProperties::SetType(vloop, Type::NonInternal());
+
+ // Load the {value} map and instance type.
+ Node* value_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ Node* value_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
+ effect, control);
+
+ // Check if the {value} is a special receiver, because for special
+ // receivers, i.e. proxies or API values that need access checks,
+ // we have to use the %HasInPrototypeChain runtime function instead.
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberLessThanOrEqual(), value_instance_type,
+ jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch1);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+
+ // Check if the {value} is not a receiver at all.
+ Node* check10 =
+ graph()->NewNode(simplified()->NumberLessThan(), value_instance_type,
+ jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+ Node* branch10 =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
+
+ // A primitive value cannot match the {prototype} we're looking for.
+ if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+ vtrue1 = jsgraph()->FalseConstant();
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+ Node* efalse1 = etrue1;
+ Node* vfalse1;
+ {
+ // Slow path, need to call the %HasInPrototypeChain runtime function.
+ vfalse1 = efalse1 = if_false1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), value,
+ prototype, context, frame_state, efalse1, if_false1);
+
+ // Replace any potential {IfException} uses of {node} to catch
+ // exceptions from this %HasInPrototypeChain runtime call instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse1);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ Revisit(on_exception);
+ }
+ }
+
+ // Load the {value} prototype.
+ Node* value_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), value_map,
+ effect, control);
+
+ // Check if we reached the end of {value}s prototype chain.
+ Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
+ value_prototype, jsgraph()->NullConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = effect;
+ Node* vtrue2 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch2);
+
+ // Check if we reached the {prototype}.
+ Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+ value_prototype, prototype);
+ Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
+
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = effect;
+ Node* vtrue3 = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch3);
+
+ // Close the loop.
+ vloop->ReplaceInput(1, value_prototype);
+ eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
+ if_true3, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+ etrue3, efalse1, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vtrue1);
+ node->ReplaceInput(2, vtrue2);
+ node->ReplaceInput(3, vtrue3);
+ node->ReplaceInput(4, vfalse1);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node,
+ common()->Phi(MachineRepresentation::kTagged, 5));
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
@@ -1534,7 +1857,7 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
{
// Convert {receiver} using the ToObjectStub. The call does not require a
// frame-state in this case, because neither null nor undefined is passed.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, node->op()->properties());
@@ -1592,7 +1915,7 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
{
// Convert {receiver} using the ToObjectStub. The call does not require a
// frame-state in this case, because neither null nor undefined is passed.
- Callable callable = CodeFactory::ToObject(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, node->op()->properties());
@@ -1840,10 +2163,6 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
if (target_type->Is(Type::Function())) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
// Patch {node} to an indirect call via CallFunctionForwardVarargs.
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
@@ -1912,10 +2231,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
Node* new_target = jsgraph()->UndefinedConstant();
Node* argument_count = jsgraph()->Constant(arity);
if (NeedsArgumentAdaptorFrame(shared, arity)) {
@@ -1951,10 +2266,6 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (target_type->Is(Type::Function())) {
// Compute flags for the call.
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
- if (p.tail_call_mode() == TailCallMode::kAllow) {
- flags |= CallDescriptor::kSupportsTailCalls;
- }
-
// Patch {node} to an indirect call via the CallFunction builtin.
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
@@ -1970,9 +2281,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Maybe we did at least learn something about the {receiver}.
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
- node,
- javascript()->Call(p.arity(), p.frequency(), p.feedback(), convert_mode,
- p.tail_call_mode()));
+ node, javascript()->Call(p.arity(), p.frequency(), p.feedback(),
+ convert_mode));
return Changed(node);
}
@@ -2032,7 +2342,8 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
{
// Filter the {key} to check if it's still a valid property of the
// {receiver} (does the ToName conversion implicitly).
- Callable const callable = CodeFactory::ForInFilter(isolate());
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kForInFilter);
CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNeedsFrameState);
@@ -2040,7 +2351,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), key,
receiver, context, frame_state, effect, if_false0);
- // Update potential {IfException} uses of {node} to point to the ahove
+ // Update potential {IfException} uses of {node} to point to the above
// ForInFilter stub call node instead.
Node* if_exception = nullptr;
if (NodeProperties::IsExceptionalCall(node, &if_exception)) {
@@ -2095,21 +2406,19 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- const GeneratorStoreParameters& p = GeneratorStoreParametersOf(node->op());
+ int register_count = OpParameter<int>(node);
FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
FieldAccess input_or_debug_pos_field =
- p.suspend_flags() == SuspendFlags::kAsyncGeneratorAwait
- ? AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos()
- : AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
generator, effect, control);
- for (int i = 0; i < p.register_count(); ++i) {
+ for (int i = 0; i < register_count; ++i) {
Node* value = NodeProperties::GetValueInput(node, 3 + i);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForFixedArraySlot(i)), array,
@@ -2195,6 +2504,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSDivide:
case IrOpcode::kJSModulus:
return ReduceNumberBinop(node);
+ case IrOpcode::kJSHasInPrototypeChain:
+ return ReduceJSHasInPrototypeChain(node);
case IrOpcode::kJSOrdinaryHasInstance:
return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSToBoolean:
@@ -2209,6 +2520,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSToNumber(node);
case IrOpcode::kJSToString:
return ReduceJSToString(node);
+ case IrOpcode::kJSToPrimitiveToString:
+ return ReduceJSToPrimitiveToString(node);
+ case IrOpcode::kJSStringConcat:
+ return ReduceJSStringConcat(node);
case IrOpcode::kJSToObject:
return ReduceJSToObject(node);
case IrOpcode::kJSTypeOf:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 0b92a40a5b..b2e2a162ed 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -42,6 +42,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Flags flags, JSGraph* jsgraph, Zone* zone);
~JSTypedLowering() final {}
+ const char* reducer_name() const override { return "JSTypedLowering"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -52,6 +54,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSHasInPrototypeChain(Node* node);
Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
@@ -67,6 +70,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSToPrimitiveToString(Node* node);
+ Reduction ReduceJSStringConcat(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
Reduction ReduceJSConstructForwardVarargs(Node* node);
@@ -92,6 +97,13 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
+ // Helpers for ReduceJSCreateConsString and ReduceJSStringConcat.
+ Node* BuildGetStringLength(Node* value, Node** effect, Node* control);
+ void BuildThrowStringRangeError(Node* node, Node* context, Node* frame_state,
+ Node* effect, Node* control);
+ Node* BuildCreateConsString(Node* first, Node* second, Node* length,
+ Node* effect, Node* control);
+
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -105,7 +117,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
CompilationDependencies* dependencies_;
Flags flags_;
JSGraph* jsgraph_;
- Type* empty_string_type_;
Type* shifted_int32_ranges_[4];
Type* pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 1275f8f6ff..7224288b5a 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -121,7 +121,6 @@ int CallDescriptor::CalculateFixedFrameSize() const {
return TypedFrameConstants::kFixedSlotCount;
}
UNREACHABLE();
- return 0;
}
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
@@ -148,9 +147,8 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kAllocateInTargetSpace:
case Runtime::kConvertReceiver:
case Runtime::kCreateIterResultObject:
- case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
- case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kGeneratorGetContinuation:
+ case Runtime::kIncBlockCounter:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
@@ -183,8 +181,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineIsArray:
case Runtime::kInlineIsJSMap:
case Runtime::kInlineIsJSSet:
- case Runtime::kInlineIsJSMapIterator:
- case Runtime::kInlineIsJSSetIterator:
case Runtime::kInlineIsJSWeakMap:
case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
@@ -348,11 +344,11 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties, MachineType return_type,
- size_t return_count) {
+ size_t return_count, Linkage::ContextSpecification context_spec) {
const int register_parameter_count = descriptor.GetRegisterParameterCount();
const int js_parameter_count =
register_parameter_count + stack_parameter_count;
- const int context_count = 1;
+ const int context_count = context_spec == kPassContext ? 1 : 0;
const size_t parameter_count =
static_cast<size_t>(js_parameter_count + context_count);
@@ -384,7 +380,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
}
}
// Add context.
- locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
+ if (context_count) {
+ locations.AddParam(regloc(kContextRegister, MachineType::AnyTagged()));
+ }
// The target for stub calls is a code object.
MachineType target_type = MachineType::AnyTagged();
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index b515aca2da..82be5c7434 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -340,6 +340,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
// Call[BytecodeDispatch] address, arg 1, arg 2, [...]
class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
public:
+ enum ContextSpecification { kNoContext, kPassContext };
+
explicit Linkage(CallDescriptor* incoming) : incoming_(incoming) {}
static CallDescriptor* ComputeIncoming(Zone* zone, CompilationInfo* info);
@@ -365,7 +367,8 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
int stack_parameter_count, CallDescriptor::Flags flags,
Operator::Properties properties = Operator::kNoProperties,
MachineType return_type = MachineType::AnyTagged(),
- size_t return_count = 1);
+ size_t return_count = 1,
+ ContextSpecification context_spec = kPassContext);
static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
static CallDescriptor* GetBytecodeDispatchCallDescriptor(
diff --git a/deps/v8/src/compiler/liveness-analyzer.cc b/deps/v8/src/compiler/liveness-analyzer.cc
deleted file mode 100644
index 0cf13332f4..0000000000
--- a/deps/v8/src/compiler/liveness-analyzer.cc
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/adapters.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/liveness-analyzer.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/state-values-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-LivenessAnalyzer::LivenessAnalyzer(size_t local_count, bool has_accumulator,
- Zone* zone)
- : zone_(zone),
- blocks_(zone),
- local_count_(local_count),
- has_accumulator_(has_accumulator),
- queue_(zone) {}
-
-void LivenessAnalyzer::Print(std::ostream& os) {
- for (auto block : blocks_) {
- block->Print(os);
- os << std::endl;
- }
-}
-
-
-LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
- LivenessAnalyzerBlock* result =
- new (zone()->New(sizeof(LivenessAnalyzerBlock))) LivenessAnalyzerBlock(
- blocks_.size(), local_count_, has_accumulator_, zone());
- blocks_.push_back(result);
- return result;
-}
-
-
-LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock(
- LivenessAnalyzerBlock* predecessor) {
- LivenessAnalyzerBlock* result = NewBlock();
- result->AddPredecessor(predecessor);
- return result;
-}
-
-
-void LivenessAnalyzer::Queue(LivenessAnalyzerBlock* block) {
- if (!block->IsQueued()) {
- block->SetQueued();
- queue_.push(block);
- }
-}
-
-
-void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
- if (local_count_ == 0 && !has_accumulator_) {
- // No variables => nothing to do.
- return;
- }
-
- // Put all blocks into the queue.
- DCHECK(queue_.empty());
- for (auto block : blocks_) {
- Queue(block);
- }
-
- // Compute the fix-point.
- BitVector working_area(
- static_cast<int>(local_count_) + (has_accumulator_ ? 1 : 0), zone_);
- while (!queue_.empty()) {
- LivenessAnalyzerBlock* block = queue_.front();
- queue_.pop();
- block->Process(&working_area, nullptr);
-
- for (auto i = block->pred_begin(); i != block->pred_end(); i++) {
- if ((*i)->UpdateLive(&working_area)) {
- Queue(*i);
- }
- }
- }
-
- // Update the frame states according to the liveness.
- for (auto block : blocks_) {
- block->Process(&working_area, replacer);
- }
-}
-
-LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
- bool has_accumulator, Zone* zone)
- : entries_(zone),
- predecessors_(zone),
- live_(static_cast<int>(local_count) + (has_accumulator ? 1 : 0), zone),
- queued_(false),
- has_accumulator_(has_accumulator),
- id_(id) {}
-
-void LivenessAnalyzerBlock::Process(BitVector* result,
- NonLiveFrameStateSlotReplacer* replacer) {
- queued_ = false;
-
- // Copy the bitvector to the target bit vector.
- result->CopyFrom(live_);
-
- for (auto entry : base::Reversed(entries_)) {
- switch (entry.kind()) {
- case Entry::kLookup:
- result->Add(entry.var());
- break;
- case Entry::kBind:
- result->Remove(entry.var());
- break;
- case Entry::kCheckpoint:
- if (replacer != nullptr) {
- replacer->ClearNonLiveFrameStateSlots(entry.node(), result);
- }
- break;
- }
- }
-}
-
-
-bool LivenessAnalyzerBlock::UpdateLive(BitVector* working_area) {
- return live_.UnionIsChanged(*working_area);
-}
-
-
-void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
- Node* frame_state, BitVector* liveness) {
- DCHECK_EQ(liveness->length(), permanently_live_.length());
-
- DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
- Node* locals_state = frame_state->InputAt(1);
- DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
- int count = liveness->length() - (has_accumulator_ ? 1 : 0);
- DCHECK_EQ(count, static_cast<int>(StateValuesAccess(locals_state).size()));
- for (int i = 0; i < count; i++) {
- if (!liveness->Contains(i) && !permanently_live_.Contains(i)) {
- Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
- frame_state->ReplaceInput(1, new_values);
- break;
- }
- }
-
- if (has_accumulator_) {
- DCHECK_EQ(frame_state->InputAt(2)->opcode(), IrOpcode::kStateValues);
- DCHECK_EQ(
- static_cast<int>(StateValuesAccess(frame_state->InputAt(2)).size()), 1);
- int index = liveness->length() - 1;
- if (!liveness->Contains(index) && !permanently_live_.Contains(index)) {
- Node* new_value =
- state_values_cache()->GetNodeForValues(&replacement_node_, 1);
- frame_state->ReplaceInput(2, new_value);
- }
- }
-}
-
-
-Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
- Node* values, BitVector* liveness) {
- DCHECK(inputs_buffer_.empty());
-
- int var = 0;
- for (Node* value_node : values->inputs()) {
- // Make sure this isn't a state value tree
- DCHECK(value_node->opcode() != IrOpcode::kStateValues);
-
- // Index of the next variable is its furure index in the inputs buffer,
- // i.e., the buffer's size.
- bool live = liveness->Contains(var) || permanently_live_.Contains(var);
- inputs_buffer_.push_back(live ? value_node : replacement_node_);
-
- var++;
- }
-
- Node* result = state_values_cache()->GetNodeForValues(
- inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
- inputs_buffer_.size());
- inputs_buffer_.clear();
- return result;
-}
-
-
-void LivenessAnalyzerBlock::Print(std::ostream& os) {
- os << "Block " << id();
- bool first = true;
- for (LivenessAnalyzerBlock* pred : predecessors_) {
- if (!first) {
- os << ", ";
- } else {
- os << "; predecessors: ";
- first = false;
- }
- os << pred->id();
- }
- os << std::endl;
-
- for (auto entry : entries_) {
- os << " ";
- switch (entry.kind()) {
- case Entry::kLookup:
- if (has_accumulator_ && entry.var() == live_.length() - 1) {
- os << "- Lookup accumulator" << std::endl;
- } else {
- os << "- Lookup " << entry.var() << std::endl;
- }
- break;
- case Entry::kBind:
- if (has_accumulator_ && entry.var() == live_.length() - 1) {
- os << "- Bind accumulator" << std::endl;
- } else {
- os << "- Bind " << entry.var() << std::endl;
- }
- break;
- case Entry::kCheckpoint:
- os << "- Checkpoint " << entry.node()->id() << std::endl;
- break;
- }
- }
-
- if (live_.length() > 0) {
- os << " Live set: ";
- for (int i = 0; i < live_.length(); i++) {
- os << (live_.Contains(i) ? "L" : ".");
- }
- os << std::endl;
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
deleted file mode 100644
index 63fc52c125..0000000000
--- a/deps/v8/src/compiler/liveness-analyzer.h
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_LIVENESS_ANAYZER_H_
-#define V8_COMPILER_LIVENESS_ANAYZER_H_
-
-#include "src/bit-vector.h"
-#include "src/compiler/node.h"
-#include "src/globals.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class LivenessAnalyzerBlock;
-class Node;
-class StateValuesCache;
-
-class NonLiveFrameStateSlotReplacer {
- public:
- void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
- NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
- Node* replacement, size_t local_count,
- bool has_accumulator, Zone* local_zone)
- : replacement_node_(replacement),
- state_values_cache_(state_values_cache),
- local_zone_(local_zone),
- permanently_live_(
- static_cast<int>(local_count) + (has_accumulator ? 1 : 0),
- local_zone),
- inputs_buffer_(local_zone),
- has_accumulator_(has_accumulator) {}
-
- // TODO(leszeks): Not used by bytecode, remove once AST graph builder is gone.
- void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
-
- private:
- Node* ClearNonLiveStateValues(Node* frame_state, BitVector* liveness);
-
- StateValuesCache* state_values_cache() { return state_values_cache_; }
- Zone* local_zone() { return local_zone_; }
-
- // Node that replaces dead values.
- Node* replacement_node_;
- // Reference to state values cache so that we can create state values
- // nodes.
- StateValuesCache* state_values_cache_;
-
- Zone* local_zone_;
- BitVector permanently_live_;
- NodeVector inputs_buffer_;
-
- bool has_accumulator_;
-};
-
-class V8_EXPORT_PRIVATE LivenessAnalyzer {
- public:
- LivenessAnalyzer(size_t local_count, bool has_accumulator, Zone* zone);
-
- LivenessAnalyzerBlock* NewBlock();
- LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
-
- void Run(NonLiveFrameStateSlotReplacer* relaxer);
-
- Zone* zone() { return zone_; }
-
- void Print(std::ostream& os);
-
- size_t local_count() { return local_count_; }
-
- private:
- void Queue(LivenessAnalyzerBlock* block);
-
- Zone* zone_;
- ZoneDeque<LivenessAnalyzerBlock*> blocks_;
- size_t local_count_;
-
- // TODO(leszeks): Always true for bytecode, remove once AST graph builder is
- // gone.
- bool has_accumulator_;
-
- ZoneQueue<LivenessAnalyzerBlock*> queue_;
-};
-
-
-class LivenessAnalyzerBlock {
- public:
- friend class LivenessAnalyzer;
-
- void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
- void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
- void LookupAccumulator() {
- DCHECK(has_accumulator_);
- // The last entry is the accumulator entry.
- entries_.push_back(Entry(Entry::kLookup, live_.length() - 1));
- }
- void BindAccumulator() {
- DCHECK(has_accumulator_);
- // The last entry is the accumulator entry.
- entries_.push_back(Entry(Entry::kBind, live_.length() - 1));
- }
-
- void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
- void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
- LivenessAnalyzerBlock* GetPredecessor() {
- DCHECK(predecessors_.size() == 1);
- return predecessors_[0];
- }
-
- private:
- class Entry {
- public:
- enum Kind { kBind, kLookup, kCheckpoint };
-
- Kind kind() const { return kind_; }
- Node* node() const {
- DCHECK(kind() == kCheckpoint);
- return node_;
- }
- int var() const {
- DCHECK(kind() != kCheckpoint);
- return var_;
- }
-
- explicit Entry(Node* node) : kind_(kCheckpoint), var_(-1), node_(node) {}
- Entry(Kind kind, int var) : kind_(kind), var_(var), node_(nullptr) {
- DCHECK(kind != kCheckpoint);
- }
-
- private:
- Kind kind_;
- int var_;
- Node* node_;
- };
-
- LivenessAnalyzerBlock(size_t id, size_t local_count, bool has_accumulator,
- Zone* zone);
- void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
- bool UpdateLive(BitVector* working_area);
-
- void SetQueued() { queued_ = true; }
- bool IsQueued() { return queued_; }
-
- ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_begin() {
- return predecessors_.begin();
- }
- ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_end() {
- return predecessors_.end();
- }
-
- size_t id() { return id_; }
- void Print(std::ostream& os);
-
- ZoneDeque<Entry> entries_;
- ZoneDeque<LivenessAnalyzerBlock*> predecessors_;
-
- BitVector live_;
- bool queued_;
- bool has_accumulator_;
-
- size_t id_;
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index b4a5b717e6..775da82587 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -114,8 +114,12 @@ Reduction LoadElimination::Reduce(Node* node) {
return ReduceLoadElement(node);
case IrOpcode::kStoreElement:
return ReduceStoreElement(node);
+ case IrOpcode::kTransitionAndStoreElement:
+ return ReduceTransitionAndStoreElement(node);
case IrOpcode::kStoreTypedElement:
return ReduceStoreTypedElement(node);
+ case IrOpcode::kLookupHashStorageIndex:
+ return ReduceLookupHashStorageIndex(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
@@ -305,6 +309,37 @@ void LoadElimination::AbstractElements::Print() const {
}
}
+Node* LoadElimination::AbstractHashIndexes::Lookup(Node* table,
+ Node* key) const {
+ if (entry_.table == nullptr) return nullptr;
+ if (MustAlias(table, entry_.table) && MustAlias(key, entry_.key)) {
+ return entry_.index;
+ }
+ return nullptr;
+}
+
+bool LoadElimination::AbstractHashIndexes::Equals(
+ AbstractHashIndexes const* that) const {
+ return entry_.table == that->entry_.table && entry_.key == that->entry_.key &&
+ entry_.index == that->entry_.index;
+}
+
+LoadElimination::AbstractHashIndexes const*
+LoadElimination::AbstractHashIndexes::Merge(AbstractHashIndexes const* that,
+ Zone* zone) const {
+ if (this->Equals(that)) return this;
+ return nullptr;
+}
+
+void LoadElimination::AbstractHashIndexes::Print() const {
+ if (entry_.table) {
+ PrintF(" #%d:%s @ #%d:%s -> #%d:%s\n", entry_.table->id(),
+ entry_.table->op()->mnemonic(), entry_.key->id(),
+ entry_.key->op()->mnemonic(), entry_.index->id(),
+ entry_.index->op()->mnemonic());
+ }
+}
+
Node* LoadElimination::AbstractField::Lookup(Node* object) const {
for (auto pair : info_for_node_) {
if (MustAlias(object, pair.first)) return pair.second;
@@ -434,6 +469,13 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
if (this->maps_) {
this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
}
+
+ // Merge the information about hash maps.
+ if (this->hash_indexes_) {
+ this->hash_indexes_ = that->hash_indexes_ ? that->hash_indexes_->Merge(
+ this->hash_indexes_, zone)
+ : nullptr;
+ }
}
Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -504,6 +546,26 @@ LoadElimination::AbstractState::AddElement(Node* object, Node* index,
return that;
}
+Node* LoadElimination::AbstractState::LookupHashIndex(Node* table,
+ Node* key) const {
+ if (this->hash_indexes_) {
+ return this->hash_indexes_->Lookup(table, key);
+ }
+ return nullptr;
+}
+
+LoadElimination::AbstractState const*
+LoadElimination::AbstractState::AddHashIndex(Node* table, Node* key,
+ Node* index, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->hash_indexes_) {
+ that->hash_indexes_ = that->hash_indexes_->Extend(table, key, index, zone);
+ } else {
+ that->hash_indexes_ = new (zone) AbstractHashIndexes(table, key, index);
+ }
+ return that;
+}
+
LoadElimination::AbstractState const*
LoadElimination::AbstractState::KillElement(Node* object, Node* index,
Zone* zone) const {
@@ -724,6 +786,30 @@ Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
return UpdateState(node, state);
}
+Reduction LoadElimination::ReduceTransitionAndStoreElement(Node* node) {
+ Node* const object = NodeProperties::GetValueInput(node, 0);
+ Handle<Map> double_map(DoubleMapParameterOf(node->op()));
+ Handle<Map> fast_map(FastMapParameterOf(node->op()));
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ // We need to add the double and fast maps to the set of possible maps for
+ // this object, because we don't know which of those we'll transition to.
+ // Additionally, we should kill all alias information.
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ object_maps.insert(double_map, zone());
+ object_maps.insert(fast_map, zone());
+ state = state->KillMaps(object, zone());
+ state = state->AddMaps(object, object_maps, zone());
+ }
+ // Kill the elements as well.
+ state =
+ state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ return UpdateState(node, state);
+}
+
Reduction LoadElimination::ReduceLoadField(Node* node) {
FieldAccess const& access = FieldAccessOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
@@ -785,7 +871,7 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
if (new_value_type->IsHeapConstant()) {
// Record the new {object} map information.
ZoneHandleSet<Map> object_maps(
- Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+ bit_cast<Handle<Map>>(new_value_type->AsHeapConstant()->Value()));
state = state->AddMaps(object, object_maps, zone());
}
} else {
@@ -819,9 +905,6 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
ElementAccess const& access = ElementAccessOf(node->op());
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
@@ -879,9 +962,6 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
// Only record the new value if the store doesn't have an implicit truncation.
switch (access.machine_type.representation()) {
case MachineRepresentation::kNone:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
UNREACHABLE();
break;
@@ -911,6 +991,25 @@ Reduction LoadElimination::ReduceStoreTypedElement(Node* node) {
return UpdateState(node, state);
}
+Reduction LoadElimination::ReduceLookupHashStorageIndex(Node* node) {
+ Node* table = node->InputAt(0);
+ Node* key = node->InputAt(1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ if (Node* replacement = state->LookupHashIndex(table, key)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ state = state->AddHashIndex(table, key, node, zone());
+ return UpdateState(node, state);
+}
+
Reduction LoadElimination::ReduceEffectPhi(Node* node) {
Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
Node* const control = NodeProperties::GetControlInput(node);
@@ -1037,6 +1136,15 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
}
break;
}
+ case IrOpcode::kTransitionAndStoreElement: {
+ Node* const object = NodeProperties::GetValueInput(current, 0);
+ // Invalidate what we know about the {object}s map.
+ state = state->KillMaps(object, zone());
+ // Kill the elements as well.
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ break;
+ }
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
@@ -1092,9 +1200,6 @@ int LoadElimination::FieldIndexOf(FieldAccess const& access) {
case MachineRepresentation::kNone:
case MachineRepresentation::kBit:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
UNREACHABLE();
break;
case MachineRepresentation::kWord32:
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 5d09aa5124..dc65a12e11 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -32,6 +32,8 @@ class V8_EXPORT_PRIVATE LoadElimination final
: AdvancedReducer(editor), node_states_(zone), jsgraph_(jsgraph) {}
~LoadElimination() final {}
+ const char* reducer_name() const override { return "LoadElimination"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -123,6 +125,46 @@ class V8_EXPORT_PRIVATE LoadElimination final
size_t next_index_ = 0;
};
+ // Abstract state to approximate the current state of a hash map along the
+ // effect paths through the graph.
+ class AbstractHashIndexes final : public ZoneObject {
+ public:
+ AbstractHashIndexes() {}
+
+ AbstractHashIndexes(Node* table, Node* key, Node* index)
+ : AbstractHashIndexes() {
+ entry_ = Entry(table, key, index);
+ }
+
+ AbstractHashIndexes const* Extend(Node* table, Node* key, Node* index,
+ Zone* zone) const {
+ // Currently, we do only hold one entry, so we just create a new
+ // state with the one entry.
+ AbstractHashIndexes* that =
+ new (zone) AbstractHashIndexes(table, key, index);
+ return that;
+ }
+ Node* Lookup(Node* table, Node* key) const;
+ bool Equals(AbstractHashIndexes const* that) const;
+ AbstractHashIndexes const* Merge(AbstractHashIndexes const* that,
+ Zone* zone) const;
+
+ void Print() const;
+
+ private:
+ struct Entry {
+ Entry() {}
+ Entry(Node* table, Node* key, Node* index)
+ : table(table), key(key), index(index) {}
+
+ Node* table = nullptr;
+ Node* key = nullptr;
+ Node* index = nullptr;
+ };
+
+ Entry entry_;
+ };
+
// Abstract state to approximate the current state of a certain field along
// the effect paths through the graph.
class AbstractField final : public ZoneObject {
@@ -240,6 +282,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Zone* zone) const;
Node* LookupElement(Node* object, Node* index,
MachineRepresentation representation) const;
+ AbstractState const* AddHashIndex(Node* table, Node* key, Node* index,
+ Zone* zone) const;
+ Node* LookupHashIndex(Node* table, Node* key) const;
AbstractState const* AddCheck(Node* node, Zone* zone) const;
Node* LookupCheck(Node* node) const;
@@ -251,6 +296,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
AbstractMaps const* maps_ = nullptr;
+ AbstractHashIndexes const* hash_indexes_ = nullptr;
};
class AbstractStateForEffectNodes final : public ZoneObject {
@@ -274,7 +320,9 @@ class V8_EXPORT_PRIVATE LoadElimination final
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
+ Reduction ReduceTransitionAndStoreElement(Node* node);
Reduction ReduceStoreTypedElement(Node* node);
+ Reduction ReduceLookupHashStorageIndex(Node* node);
Reduction ReduceEffectPhi(Node* node);
Reduction ReduceStart(Node* node);
Reduction ReduceOtherNode(Node* node);
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index fb3e1e753b..084d4ce06a 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -123,7 +123,6 @@ class LoopTree : public ZoneObject {
if (node->opcode() == IrOpcode::kLoop) return node;
}
UNREACHABLE();
- return nullptr;
}
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 6ac7a163e1..32123b3440 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -794,9 +794,6 @@ class MachineRepresentationChecker {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kBit:
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index a50f0dcb1b..383f2799fe 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -800,8 +800,8 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
int32_t const divisor = m.right().Value();
Node* const dividend = m.left().node();
Node* quotient = dividend;
- if (base::bits::IsPowerOfTwo32(Abs(divisor))) {
- uint32_t const shift = WhichPowerOf2Abs(divisor);
+ if (base::bits::IsPowerOfTwo(Abs(divisor))) {
+ uint32_t const shift = WhichPowerOf2(Abs(divisor));
DCHECK_NE(0u, shift);
if (shift > 1) {
quotient = Word32Sar(quotient, 31);
@@ -840,7 +840,7 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
- if (base::bits::IsPowerOfTwo32(divisor)) { // x / 2^n => x >> n
+ if (base::bits::IsPowerOfTwo(divisor)) { // x / 2^n => x >> n
node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32Shr());
@@ -866,8 +866,8 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
}
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
- int32_t const divisor = Abs(m.right().Value());
- if (base::bits::IsPowerOfTwo32(divisor)) {
+ uint32_t const divisor = Abs(m.right().Value());
+ if (base::bits::IsPowerOfTwo(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
Diamond d(graph(), common(),
@@ -903,7 +903,7 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
if (m.right().HasValue()) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
- if (base::bits::IsPowerOfTwo32(divisor)) { // x % 2^n => x & 2^n-1
+ if (base::bits::IsPowerOfTwo(divisor)) { // x % 2^n => x & 2^n-1
node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
node->TrimInputCount(2);
NodeProperties::ChangeOp(node, machine()->Word32And());
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 593f7f2d22..278db5324d 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -28,6 +28,8 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
bool allow_signalling_nan = true);
~MachineOperatorReducer();
+ const char* reducer_name() const override { return "MachineOperatorReducer"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 96f7dc1a91..b137543e00 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -270,15 +270,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I32x4MaxS, Operator::kCommutative, 2, 0, 1) \
V(I32x4Eq, Operator::kCommutative, 2, 0, 1) \
V(I32x4Ne, Operator::kCommutative, 2, 0, 1) \
- V(I32x4LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I32x4LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GeS, Operator::kNoProperties, 2, 0, 1) \
V(I32x4UConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4UConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
V(I32x4UConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
V(I32x4MinU, Operator::kCommutative, 2, 0, 1) \
V(I32x4MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I32x4LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I32x4LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I32x4GeU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Splat, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8SConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
@@ -294,8 +294,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8MaxS, Operator::kCommutative, 2, 0, 1) \
V(I16x8Eq, Operator::kCommutative, 2, 0, 1) \
V(I16x8Ne, Operator::kCommutative, 2, 0, 1) \
- V(I16x8LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GeS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8UConvertI8x16Low, Operator::kNoProperties, 1, 0, 1) \
V(I16x8UConvertI8x16High, Operator::kNoProperties, 1, 0, 1) \
V(I16x8UConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
@@ -303,8 +303,8 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
V(I16x8MinU, Operator::kCommutative, 2, 0, 1) \
V(I16x8MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I16x8LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I16x8LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I16x8GeU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16Splat, Operator::kNoProperties, 1, 0, 1) \
V(I8x16Neg, Operator::kNoProperties, 1, 0, 1) \
V(I8x16SConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
@@ -317,15 +317,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I8x16MaxS, Operator::kCommutative, 2, 0, 1) \
V(I8x16Eq, Operator::kCommutative, 2, 0, 1) \
V(I8x16Ne, Operator::kCommutative, 2, 0, 1) \
- V(I8x16LtS, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16LeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GeS, Operator::kNoProperties, 2, 0, 1) \
V(I8x16UConvertI16x8, Operator::kNoProperties, 2, 0, 1) \
V(I8x16AddSaturateU, Operator::kCommutative, 2, 0, 1) \
V(I8x16SubSaturateU, Operator::kNoProperties, 2, 0, 1) \
V(I8x16MinU, Operator::kCommutative, 2, 0, 1) \
V(I8x16MaxU, Operator::kCommutative, 2, 0, 1) \
- V(I8x16LtU, Operator::kNoProperties, 2, 0, 1) \
- V(I8x16LeU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I8x16GeU, Operator::kNoProperties, 2, 0, 1) \
V(S128Load, Operator::kNoProperties, 2, 0, 1) \
V(S128Store, Operator::kNoProperties, 3, 0, 1) \
V(S128Zero, Operator::kNoProperties, 0, 0, 1) \
@@ -333,28 +333,11 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(S128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
- V(S32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(S16x8Select, Operator::kNoProperties, 3, 0, 1) \
- V(S8x16Select, Operator::kNoProperties, 3, 0, 1) \
- V(S1x4Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x4Not, Operator::kNoProperties, 1, 0, 1) \
+ V(S128Select, Operator::kNoProperties, 3, 0, 1) \
V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x8Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x8Not, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S1x16Zero, Operator::kNoProperties, 0, 0, 1) \
- V(S1x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(S1x16Not, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)
@@ -710,7 +693,6 @@ const Operator* MachineOperatorBuilder::UnalignedLoad(
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::UnalignedStore(
@@ -722,14 +704,10 @@ const Operator* MachineOperatorBuilder::UnalignedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
#define PURE(Name, properties, value_input_count, control_input_count, \
@@ -759,7 +737,6 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
@@ -770,7 +747,6 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
@@ -810,14 +786,10 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::ProtectedStore(
@@ -830,14 +802,10 @@ const Operator* MachineOperatorBuilder::ProtectedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
@@ -865,7 +833,6 @@ const Operator* MachineOperatorBuilder::CheckedLoad(
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
@@ -878,14 +845,10 @@ const Operator* MachineOperatorBuilder::CheckedStore(
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
case MachineRepresentation::kBit:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
@@ -896,7 +859,6 @@ const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
ATOMIC_TYPE_LIST(LOAD)
#undef LOAD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
@@ -907,7 +869,6 @@ const Operator* MachineOperatorBuilder::AtomicStore(MachineRepresentation rep) {
ATOMIC_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
@@ -918,7 +879,6 @@ const Operator* MachineOperatorBuilder::AtomicExchange(MachineType rep) {
ATOMIC_TYPE_LIST(EXCHANGE)
#undef EXCHANGE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
@@ -929,7 +889,6 @@ const Operator* MachineOperatorBuilder::AtomicCompareExchange(MachineType rep) {
ATOMIC_TYPE_LIST(COMPARE_EXCHANGE)
#undef COMPARE_EXCHANGE
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
@@ -940,7 +899,6 @@ const Operator* MachineOperatorBuilder::AtomicAdd(MachineType rep) {
ATOMIC_TYPE_LIST(ADD)
#undef ADD
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
@@ -951,7 +909,6 @@ const Operator* MachineOperatorBuilder::AtomicSub(MachineType rep) {
ATOMIC_TYPE_LIST(SUB)
#undef SUB
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
@@ -962,7 +919,6 @@ const Operator* MachineOperatorBuilder::AtomicAnd(MachineType rep) {
ATOMIC_TYPE_LIST(AND)
#undef AND
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
@@ -973,7 +929,6 @@ const Operator* MachineOperatorBuilder::AtomicOr(MachineType rep) {
ATOMIC_TYPE_LIST(OR)
#undef OR
UNREACHABLE();
- return nullptr;
}
const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
@@ -984,7 +939,6 @@ const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
ATOMIC_TYPE_LIST(XOR)
#undef XOR
UNREACHABLE();
- return nullptr;
}
#define SIMD_LANE_OPS(Type, lane_count) \
@@ -1027,23 +981,8 @@ SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
-const Operator* MachineOperatorBuilder::S32x4Shuffle(uint8_t shuffle[16]) {
- uint8_t* array = zone_->NewArray<uint8_t>(4);
- memcpy(array, shuffle, 4);
- return new (zone_)
- Operator1<uint8_t*>(IrOpcode::kS32x4Shuffle, Operator::kPure, "Shuffle",
- 2, 0, 0, 1, 0, 0, array);
-}
-
-const Operator* MachineOperatorBuilder::S16x8Shuffle(uint8_t shuffle[16]) {
- uint8_t* array = zone_->NewArray<uint8_t>(8);
- memcpy(array, shuffle, 8);
- return new (zone_)
- Operator1<uint8_t*>(IrOpcode::kS16x8Shuffle, Operator::kPure, "Shuffle",
- 2, 0, 0, 1, 0, 0, array);
-}
-
-const Operator* MachineOperatorBuilder::S8x16Shuffle(uint8_t shuffle[16]) {
+const Operator* MachineOperatorBuilder::S8x16Shuffle(
+ const uint8_t shuffle[16]) {
uint8_t* array = zone_->NewArray<uint8_t>(16);
memcpy(array, shuffle, 16);
return new (zone_)
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 82d40a09e3..457c598de1 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -494,8 +494,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4MaxS();
const Operator* I32x4Eq();
const Operator* I32x4Ne();
- const Operator* I32x4LtS();
- const Operator* I32x4LeS();
+ const Operator* I32x4GtS();
+ const Operator* I32x4GeS();
const Operator* I32x4UConvertF32x4();
const Operator* I32x4UConvertI16x8Low();
@@ -503,8 +503,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4ShrU(int32_t);
const Operator* I32x4MinU();
const Operator* I32x4MaxU();
- const Operator* I32x4LtU();
- const Operator* I32x4LeU();
+ const Operator* I32x4GtU();
+ const Operator* I32x4GeU();
const Operator* I16x8Splat();
const Operator* I16x8ExtractLane(int32_t);
@@ -525,8 +525,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8MaxS();
const Operator* I16x8Eq();
const Operator* I16x8Ne();
- const Operator* I16x8LtS();
- const Operator* I16x8LeS();
+ const Operator* I16x8GtS();
+ const Operator* I16x8GeS();
const Operator* I16x8UConvertI8x16Low();
const Operator* I16x8UConvertI8x16High();
@@ -536,8 +536,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8SubSaturateU();
const Operator* I16x8MinU();
const Operator* I16x8MaxU();
- const Operator* I16x8LtU();
- const Operator* I16x8LeU();
+ const Operator* I16x8GtU();
+ const Operator* I16x8GeU();
const Operator* I8x16Splat();
const Operator* I8x16ExtractLane(int32_t);
@@ -555,8 +555,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16MaxS();
const Operator* I8x16Eq();
const Operator* I8x16Ne();
- const Operator* I8x16LtS();
- const Operator* I8x16LeS();
+ const Operator* I8x16GtS();
+ const Operator* I8x16GeS();
const Operator* I8x16ShrU(int32_t);
const Operator* I8x16UConvertI16x8();
@@ -564,8 +564,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I8x16SubSaturateU();
const Operator* I8x16MinU();
const Operator* I8x16MaxU();
- const Operator* I8x16LtU();
- const Operator* I8x16LeU();
+ const Operator* I8x16GtU();
+ const Operator* I8x16GeU();
const Operator* S128Load();
const Operator* S128Store();
@@ -575,35 +575,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Or();
const Operator* S128Xor();
const Operator* S128Not();
+ const Operator* S128Select();
+
+ const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
- const Operator* S32x4Shuffle(uint8_t shuffle[16]);
- const Operator* S32x4Select();
- const Operator* S16x8Shuffle(uint8_t shuffle[16]);
- const Operator* S16x8Select();
- const Operator* S8x16Shuffle(uint8_t shuffle[16]);
- const Operator* S8x16Select();
-
- const Operator* S1x4Zero();
- const Operator* S1x4And();
- const Operator* S1x4Or();
- const Operator* S1x4Xor();
- const Operator* S1x4Not();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
-
- const Operator* S1x8Zero();
- const Operator* S1x8And();
- const Operator* S1x8Or();
- const Operator* S1x8Xor();
- const Operator* S1x8Not();
const Operator* S1x8AnyTrue();
const Operator* S1x8AllTrue();
-
- const Operator* S1x16Zero();
- const Operator* S1x16And();
- const Operator* S1x16Or();
- const Operator* S1x16Xor();
- const Operator* S1x16Not();
const Operator* S1x16AnyTrue();
const Operator* S1x16AllTrue();
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 5055735ba6..e87f210264 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -14,8 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
@@ -80,11 +79,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
@@ -96,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(zero_reg);
}
Operand InputOperand(size_t index) {
@@ -120,7 +116,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
@@ -233,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -251,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Addu(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
@@ -268,15 +264,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
-#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
class ool_name final : public OutOfLineCode { \
public: \
ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
: OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
\
- void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
\
private: \
T const dst_; \
@@ -320,7 +317,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -334,7 +330,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -368,7 +363,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
break;
}
UNREACHABLE();
- return kNoFPUCondition;
}
} // namespace
@@ -518,7 +512,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -530,7 +524,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
__ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
@@ -579,7 +573,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -587,10 +581,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Subu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Subu(sp, sp, stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Addu(sp, sp, -stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -599,13 +593,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -619,8 +613,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
@@ -636,8 +629,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
@@ -752,7 +744,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -855,8 +848,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log2);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -938,68 +931,75 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
case kMipsCtz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Subu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ clz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x1F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x20);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (IsMipsArchVariant(kMips32r6)) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ rotr(dst, src, 16);
+ __ wsbh(dst, dst);
+ __ bitswap(dst, dst);
+ __ Clz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Addu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ Clz(dst, dst);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 32);
+ __ Subu(dst, kScratchReg, dst);
+ }
} break;
case kMipsPopcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint32_t m1 = 0x55555555;
- uint32_t m2 = 0x33333333;
- uint32_t m4 = 0x0f0f0f0f;
- uint32_t m8 = 0x00ff00ff;
- uint32_t m16 = 0x0000ffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ srl(reg1, i.InputRegister(0), 1);
- __ And(reg2, i.InputRegister(0), at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ srl(reg2, reg1, 2);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ srl(reg2, reg1, 4);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ srl(reg2, reg1, 8);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m16);
- __ srl(reg2, reg1, 16);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ addu(i.OutputRegister(), reg1, reg2);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For 64-bit quantities, this algorithm gets executed twice, (once
+ // for in_lo, and again for in_hi), but saves a few instructions
+ // because the mask values only have to be loaded once. Using this
+ // algorithm the count for a 64-bit operand can be performed in 29
+ // instructions compared to a loop-based algorithm which requires 47
+ // instructions.
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ srl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Subu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ srl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Addu(kScratchReg, dst, kScratchReg);
+ __ srl(dst, kScratchReg, 4);
+ __ Addu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Mul(dst, dst, kScratchReg);
+ __ srl(dst, dst, shift);
} break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1120,7 +1120,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1206,7 +1206,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMipsModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
@@ -1653,24 +1653,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNREACHABLE();
break;
case kMipsS128Zero: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register());
break;
}
case kMipsI32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1680,31 +1680,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMipsF32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ FmoveLow(i.OutputSingleRegister(), kScratchReg);
break;
}
case kMipsF32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1715,213 +1715,211 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsF32x4SConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4UConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMipsI32x4Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMipsI32x4MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsS32x4Select:
- case kMipsS16x8Select:
- case kMipsS8x16Select: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ case kMipsS128Select: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Abs: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMipsF32x4RecipApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4RecipSqrtApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsF32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Max: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Min: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Lt: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsF32x4Le: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI32x4SConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4UConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMipsI32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI32x4LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI32x4GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI16x8Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI16x8ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI16x8ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -1931,146 +1929,146 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI16x8Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI16x8Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMipsI16x8Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8AddSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8SubSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
- case kMipsI16x8LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI16x8LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI16x8AddSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8SubSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMipsI16x8MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMipsI16x8LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMipsI16x8LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMipsI16x8GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMipsI8x16Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMipsI8x16ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMipsI8x16ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2080,24 +2078,637 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsI8x16Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMipsI8x16Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kMipsI8x16ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
+ case kMipsI8x16Add: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16AddSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Sub: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16SubSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Mul: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MaxS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MinS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Eq: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMipsI8x16GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16ShrU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMipsI8x16AddSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16SubSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MaxU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16MinU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsS128And: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Or: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Xor: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsS128Not: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsS1x4AnyTrue:
+ case kMipsS1x8AnyTrue:
+ case kMipsS1x16AnyTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_false;
+
+ __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 0); // branch delay slot
+ __ li(dst, -1);
+ __ bind(&all_false);
+ break;
+ }
+ case kMipsS1x4AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsS1x8AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsS1x16AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMipsMsaLd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ }
+ case kMipsMsaSt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
+ break;
+ }
+ case kMipsS32x4InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [5, 1, 4, 0]
+ __ ilvr_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 6, 2]
+ __ ilvl_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 4, 2, 0]
+ __ pckev_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 5, 3, 1]
+ __ pckod_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 2, 4, 0]
+ __ ilvev_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 5, 1]
+ __ ilvod_w(dst, src1, src0);
+ break;
+ }
+ case kMipsS32x4Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int32_t shuffle = i.InputInt32(2);
+
+ if (src0.is(src1)) {
+ // Unary S32x4 shuffles are handled with shf.w instruction
+ uint32_t i8 = 0;
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0xff;
+ DCHECK(lane < 4);
+ i8 |= lane << (2 * i);
+ shuffle >>= 8;
+ }
+ __ shf_w(dst, src0, i8);
+ } else {
+ // For binary shuffles use vshf.w instruction
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(dst, kSimd128RegZero, dst);
+ __ ilvr_h(dst, kSimd128RegZero, dst);
+ __ vshf_w(dst, src1, src0);
+ }
+ break;
+ }
+ case kMipsS16x8InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [11, 3, 10, 2, 9, 1, 8, 0]
+ __ ilvr_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, 14, 6, 13, 5, 12, 4]
+ __ ilvl_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 12, 10, 8, 6, 4, 2, 0]
+ __ pckev_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 13, 11, 9, 7, 5, 3, 1]
+ __ pckod_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 6, 12, 4, 10, 2, 8, 0]
+ __ ilvev_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x8InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, ... 11, 3, 9, 1]
+ __ ilvod_h(dst, src1, src0);
+ break;
+ }
+ case kMipsS16x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMipsS16x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMipsS8x16InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [23, 7, ... 17, 1, 16, 0]
+ __ ilvr_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 25, 9, 24, 8]
+ __ ilvl_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 28, ... 6, 4, 2, 0]
+ __ pckev_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 29, ... 7, 5, 3, 1]
+ __ pckod_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 14, ... 18, 2, 16, 0]
+ __ ilvev_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 19, 3, 17, 1]
+ __ ilvod_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x16Concat: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kMipsS8x16Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(3));
+ __ insert_w(dst, 1, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(4));
+ __ insert_w(dst, 2, kScratchReg);
+ __ li(kScratchReg, i.InputInt32(5));
+ __ insert_w(dst, 3, kScratchReg);
+ __ vshf_b(dst, src1, src0);
+ break;
+ }
+ case kMipsS8x8Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
+ // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
+ // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
+ __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
+ __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
+ break;
+ }
+ case kMipsS8x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMipsS8x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMipsI32x4SConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMipsI32x4SConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMipsI32x4UConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4UConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8SConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMipsI16x8SConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMipsI16x8SConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_w(kSimd128ScratchReg, src0, 15);
+ __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8UConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_w(kSimd128ScratchReg, src0, 15);
+ __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8UConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8UConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16SConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_h(kSimd128ScratchReg, src0, 7);
+ __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI8x16UConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_h(kSimd128ScratchReg, src0, 7);
+ __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsF32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
+ __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
+ __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
+ __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_d(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMipsI16x8AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_w(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2134,11 +2745,11 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
-void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ masm->
+#define __ tasm->
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
@@ -2227,14 +2838,14 @@ void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ masm()->
+#define __ tasm()->
}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -2277,14 +2888,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ tasm()->isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2303,7 +2914,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
bool frame_elided = !frame_access_state()->has_frame();
auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
// Assembles boolean materializations after an instruction.
@@ -2325,7 +2936,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo32(i.InputOperand(1).immediate())) {
+ base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros32(i.InputOperand(1).immediate());
__ Ext(result, i.InputRegister(0), pos, 1);
@@ -2504,7 +3115,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2563,7 +3174,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
@@ -2672,13 +3283,13 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
UNREACHABLE();
break;
case Constant::kFloat64:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
@@ -2717,7 +3328,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- __ Move(dst, src.ToFloat64());
+ __ Move(dst, src.ToFloat64().value());
if (destination->IsFPStackSlot()) {
__ Sdc1(dst, g.ToMemOperand(destination));
}
@@ -2875,11 +3486,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index f80fae9340..3a2a873e48 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -138,6 +138,7 @@ namespace compiler {
V(MipsI32x4ExtractLane) \
V(MipsI32x4ReplaceLane) \
V(MipsI32x4Add) \
+ V(MipsI32x4AddHoriz) \
V(MipsI32x4Sub) \
V(MipsF32x4Splat) \
V(MipsF32x4ExtractLane) \
@@ -154,12 +155,12 @@ namespace compiler {
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
- V(MipsS32x4Select) \
V(MipsF32x4Abs) \
V(MipsF32x4Neg) \
V(MipsF32x4RecipApprox) \
V(MipsF32x4RecipSqrtApprox) \
V(MipsF32x4Add) \
+ V(MipsF32x4AddHoriz) \
V(MipsF32x4Sub) \
V(MipsF32x4Mul) \
V(MipsF32x4Max) \
@@ -171,10 +172,10 @@ namespace compiler {
V(MipsI32x4SConvertF32x4) \
V(MipsI32x4UConvertF32x4) \
V(MipsI32x4Neg) \
- V(MipsI32x4LtS) \
- V(MipsI32x4LeS) \
- V(MipsI32x4LtU) \
- V(MipsI32x4LeU) \
+ V(MipsI32x4GtS) \
+ V(MipsI32x4GeS) \
+ V(MipsI32x4GtU) \
+ V(MipsI32x4GeU) \
V(MipsI16x8Splat) \
V(MipsI16x8ExtractLane) \
V(MipsI16x8ReplaceLane) \
@@ -184,6 +185,7 @@ namespace compiler {
V(MipsI16x8ShrU) \
V(MipsI16x8Add) \
V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8AddHoriz) \
V(MipsI16x8Sub) \
V(MipsI16x8SubSaturateS) \
V(MipsI16x8Mul) \
@@ -191,22 +193,89 @@ namespace compiler {
V(MipsI16x8MinS) \
V(MipsI16x8Eq) \
V(MipsI16x8Ne) \
- V(MipsI16x8LtS) \
- V(MipsI16x8LeS) \
+ V(MipsI16x8GtS) \
+ V(MipsI16x8GeS) \
V(MipsI16x8AddSaturateU) \
V(MipsI16x8SubSaturateU) \
V(MipsI16x8MaxU) \
V(MipsI16x8MinU) \
- V(MipsI16x8LtU) \
- V(MipsI16x8LeU) \
+ V(MipsI16x8GtU) \
+ V(MipsI16x8GeU) \
V(MipsI8x16Splat) \
V(MipsI8x16ExtractLane) \
V(MipsI8x16ReplaceLane) \
V(MipsI8x16Neg) \
V(MipsI8x16Shl) \
V(MipsI8x16ShrS) \
- V(MipsS16x8Select) \
- V(MipsS8x16Select)
+ V(MipsI8x16Add) \
+ V(MipsI8x16AddSaturateS) \
+ V(MipsI8x16Sub) \
+ V(MipsI8x16SubSaturateS) \
+ V(MipsI8x16Mul) \
+ V(MipsI8x16MaxS) \
+ V(MipsI8x16MinS) \
+ V(MipsI8x16Eq) \
+ V(MipsI8x16Ne) \
+ V(MipsI8x16GtS) \
+ V(MipsI8x16GeS) \
+ V(MipsI8x16ShrU) \
+ V(MipsI8x16AddSaturateU) \
+ V(MipsI8x16SubSaturateU) \
+ V(MipsI8x16MaxU) \
+ V(MipsI8x16MinU) \
+ V(MipsI8x16GtU) \
+ V(MipsI8x16GeU) \
+ V(MipsS128And) \
+ V(MipsS128Or) \
+ V(MipsS128Xor) \
+ V(MipsS128Not) \
+ V(MipsS128Select) \
+ V(MipsS1x4AnyTrue) \
+ V(MipsS1x4AllTrue) \
+ V(MipsS1x8AnyTrue) \
+ V(MipsS1x8AllTrue) \
+ V(MipsS1x16AnyTrue) \
+ V(MipsS1x16AllTrue) \
+ V(MipsS32x4InterleaveRight) \
+ V(MipsS32x4InterleaveLeft) \
+ V(MipsS32x4PackEven) \
+ V(MipsS32x4PackOdd) \
+ V(MipsS32x4InterleaveEven) \
+ V(MipsS32x4InterleaveOdd) \
+ V(MipsS32x4Shuffle) \
+ V(MipsS16x8InterleaveRight) \
+ V(MipsS16x8InterleaveLeft) \
+ V(MipsS16x8PackEven) \
+ V(MipsS16x8PackOdd) \
+ V(MipsS16x8InterleaveEven) \
+ V(MipsS16x8InterleaveOdd) \
+ V(MipsS16x4Reverse) \
+ V(MipsS16x2Reverse) \
+ V(MipsS8x16InterleaveRight) \
+ V(MipsS8x16InterleaveLeft) \
+ V(MipsS8x16PackEven) \
+ V(MipsS8x16PackOdd) \
+ V(MipsS8x16InterleaveEven) \
+ V(MipsS8x16InterleaveOdd) \
+ V(MipsS8x16Shuffle) \
+ V(MipsS8x16Concat) \
+ V(MipsS8x8Reverse) \
+ V(MipsS8x4Reverse) \
+ V(MipsS8x2Reverse) \
+ V(MipsMsaLd) \
+ V(MipsMsaSt) \
+ V(MipsI32x4SConvertI16x8Low) \
+ V(MipsI32x4SConvertI16x8High) \
+ V(MipsI32x4UConvertI16x8Low) \
+ V(MipsI32x4UConvertI16x8High) \
+ V(MipsI16x8SConvertI8x16Low) \
+ V(MipsI16x8SConvertI8x16High) \
+ V(MipsI16x8SConvertI32x4) \
+ V(MipsI16x8UConvertI32x4) \
+ V(MipsI16x8UConvertI8x16Low) \
+ V(MipsI16x8UConvertI8x16High) \
+ V(MipsI8x16SConvertI16x8) \
+ V(MipsI8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1058833a43..9d5a2d95a1 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -294,11 +294,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaLd;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -382,11 +381,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsSw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaSt;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -732,20 +730,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
MipsOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMipsShl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1234,11 +1232,10 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kMipsUldc1;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaLd;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1287,11 +1284,10 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsUsw;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kMipsMsaSt;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1340,9 +1336,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1683,6 +1676,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 9 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
@@ -1690,7 +1684,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -1941,316 +1936,348 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRR(this, kMipsI32x4Splat, node);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- VisitRRI(this, kMipsI32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI32x4Add(Node* node) {
- VisitRRR(this, kMipsI32x4Add, node);
-}
-
-void InstructionSelector::VisitI32x4Sub(Node* node) {
- VisitRRR(this, kMipsI32x4Sub, node);
-}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \
+ V(F32x4Abs, kMipsF32x4Abs) \
+ V(F32x4Neg, kMipsF32x4Neg) \
+ V(F32x4RecipApprox, kMipsF32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMipsF32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \
+ V(I32x4Neg, kMipsI32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \
+ V(I16x8Neg, kMipsI16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \
+ V(I8x16Neg, kMipsI8x16Neg) \
+ V(S128Not, kMipsS128Not) \
+ V(S1x4AnyTrue, kMipsS1x4AnyTrue) \
+ V(S1x4AllTrue, kMipsS1x4AllTrue) \
+ V(S1x8AnyTrue, kMipsS1x8AnyTrue) \
+ V(S1x8AllTrue, kMipsS1x8AllTrue) \
+ V(S1x16AnyTrue, kMipsS1x16AnyTrue) \
+ V(S1x16AllTrue, kMipsS1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kMipsF32x4Add) \
+ V(F32x4AddHoriz, kMipsF32x4AddHoriz) \
+ V(F32x4Sub, kMipsF32x4Sub) \
+ V(F32x4Mul, kMipsF32x4Mul) \
+ V(F32x4Max, kMipsF32x4Max) \
+ V(F32x4Min, kMipsF32x4Min) \
+ V(F32x4Eq, kMipsF32x4Eq) \
+ V(F32x4Ne, kMipsF32x4Ne) \
+ V(F32x4Lt, kMipsF32x4Lt) \
+ V(F32x4Le, kMipsF32x4Le) \
+ V(I32x4Add, kMipsI32x4Add) \
+ V(I32x4AddHoriz, kMipsI32x4AddHoriz) \
+ V(I32x4Sub, kMipsI32x4Sub) \
+ V(I32x4Mul, kMipsI32x4Mul) \
+ V(I32x4MaxS, kMipsI32x4MaxS) \
+ V(I32x4MinS, kMipsI32x4MinS) \
+ V(I32x4MaxU, kMipsI32x4MaxU) \
+ V(I32x4MinU, kMipsI32x4MinU) \
+ V(I32x4Eq, kMipsI32x4Eq) \
+ V(I32x4Ne, kMipsI32x4Ne) \
+ V(I32x4GtS, kMipsI32x4GtS) \
+ V(I32x4GeS, kMipsI32x4GeS) \
+ V(I32x4GtU, kMipsI32x4GtU) \
+ V(I32x4GeU, kMipsI32x4GeU) \
+ V(I16x8Add, kMipsI16x8Add) \
+ V(I16x8AddSaturateS, kMipsI16x8AddSaturateS) \
+ V(I16x8AddSaturateU, kMipsI16x8AddSaturateU) \
+ V(I16x8AddHoriz, kMipsI16x8AddHoriz) \
+ V(I16x8Sub, kMipsI16x8Sub) \
+ V(I16x8SubSaturateS, kMipsI16x8SubSaturateS) \
+ V(I16x8SubSaturateU, kMipsI16x8SubSaturateU) \
+ V(I16x8Mul, kMipsI16x8Mul) \
+ V(I16x8MaxS, kMipsI16x8MaxS) \
+ V(I16x8MinS, kMipsI16x8MinS) \
+ V(I16x8MaxU, kMipsI16x8MaxU) \
+ V(I16x8MinU, kMipsI16x8MinU) \
+ V(I16x8Eq, kMipsI16x8Eq) \
+ V(I16x8Ne, kMipsI16x8Ne) \
+ V(I16x8GtS, kMipsI16x8GtS) \
+ V(I16x8GeS, kMipsI16x8GeS) \
+ V(I16x8GtU, kMipsI16x8GtU) \
+ V(I16x8GeU, kMipsI16x8GeU) \
+ V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \
+ V(I8x16Add, kMipsI8x16Add) \
+ V(I8x16AddSaturateS, kMipsI8x16AddSaturateS) \
+ V(I8x16AddSaturateU, kMipsI8x16AddSaturateU) \
+ V(I8x16Sub, kMipsI8x16Sub) \
+ V(I8x16SubSaturateS, kMipsI8x16SubSaturateS) \
+ V(I8x16SubSaturateU, kMipsI8x16SubSaturateU) \
+ V(I8x16Mul, kMipsI8x16Mul) \
+ V(I8x16MaxS, kMipsI8x16MaxS) \
+ V(I8x16MinS, kMipsI8x16MinS) \
+ V(I8x16MaxU, kMipsI8x16MaxU) \
+ V(I8x16MinU, kMipsI8x16MinU) \
+ V(I8x16Eq, kMipsI8x16Eq) \
+ V(I8x16Ne, kMipsI8x16Ne) \
+ V(I8x16GtS, kMipsI8x16GtS) \
+ V(I8x16GeS, kMipsI8x16GeS) \
+ V(I8x16GtU, kMipsI8x16GtU) \
+ V(I8x16GeU, kMipsI8x16GeU) \
+ V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \
+ V(S128And, kMipsS128And) \
+ V(S128Or, kMipsS128Or) \
+ V(S128Xor, kMipsS128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
}
-void InstructionSelector::VisitS1x4Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x8Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x16Zero(Node* node) {
- MipsOperandGenerator g(this);
- Emit(kMipsS128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitF32x4Splat(Node* node) {
- VisitRR(this, kMipsF32x4Splat, node);
-}
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
- VisitRRI(this, kMipsF32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsF32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- VisitRR(this, kMipsF32x4SConvertI32x4, node);
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- VisitRR(this, kMipsF32x4UConvertI32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Mul(Node* node) {
- VisitRRR(this, kMipsI32x4Mul, node);
-}
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) {
- VisitRRR(this, kMipsI32x4MaxS, node);
-}
-
-void InstructionSelector::VisitI32x4MinS(Node* node) {
- VisitRRR(this, kMipsI32x4MinS, node);
-}
-
-void InstructionSelector::VisitI32x4Eq(Node* node) {
- VisitRRR(this, kMipsI32x4Eq, node);
-}
-
-void InstructionSelector::VisitI32x4Ne(Node* node) {
- VisitRRR(this, kMipsI32x4Ne, node);
-}
-
-void InstructionSelector::VisitI32x4Shl(Node* node) {
- VisitRRI(this, kMipsI32x4Shl, node);
-}
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) {
- VisitRRI(this, kMipsI32x4ShrS, node);
-}
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) {
- VisitRRI(this, kMipsI32x4ShrU, node);
-}
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) {
- VisitRRR(this, kMipsI32x4MaxU, node);
-}
-
-void InstructionSelector::VisitI32x4MinU(Node* node) {
- VisitRRR(this, kMipsI32x4MinU, node);
-}
-
-void InstructionSelector::VisitS32x4Select(Node* node) {
- VisitRRRR(this, kMipsS32x4Select, node);
-}
-
-void InstructionSelector::VisitF32x4Abs(Node* node) {
- VisitRR(this, kMipsF32x4Abs, node);
-}
-
-void InstructionSelector::VisitF32x4Neg(Node* node) {
- VisitRR(this, kMipsF32x4Neg, node);
-}
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
- VisitRR(this, kMipsF32x4RecipApprox, node);
-}
-
-void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
- VisitRR(this, kMipsF32x4RecipSqrtApprox, node);
-}
-
-void InstructionSelector::VisitF32x4Add(Node* node) {
- VisitRRR(this, kMipsF32x4Add, node);
-}
-
-void InstructionSelector::VisitF32x4Sub(Node* node) {
- VisitRRR(this, kMipsF32x4Sub, node);
-}
-
-void InstructionSelector::VisitF32x4Mul(Node* node) {
- VisitRRR(this, kMipsF32x4Mul, node);
-}
-
-void InstructionSelector::VisitF32x4Max(Node* node) {
- VisitRRR(this, kMipsF32x4Max, node);
-}
-
-void InstructionSelector::VisitF32x4Min(Node* node) {
- VisitRRR(this, kMipsF32x4Min, node);
-}
-
-void InstructionSelector::VisitF32x4Eq(Node* node) {
- VisitRRR(this, kMipsF32x4Eq, node);
-}
-
-void InstructionSelector::VisitF32x4Ne(Node* node) {
- VisitRRR(this, kMipsF32x4Ne, node);
-}
-
-void InstructionSelector::VisitF32x4Lt(Node* node) {
- VisitRRR(this, kMipsF32x4Lt, node);
-}
-
-void InstructionSelector::VisitF32x4Le(Node* node) {
- VisitRRR(this, kMipsF32x4Le, node);
-}
-
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRR(this, kMipsI32x4SConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- VisitRR(this, kMipsI32x4UConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Neg(Node* node) {
- VisitRR(this, kMipsI32x4Neg, node);
-}
-
-void InstructionSelector::VisitI32x4LtS(Node* node) {
- VisitRRR(this, kMipsI32x4LtS, node);
-}
-
-void InstructionSelector::VisitI32x4LeS(Node* node) {
- VisitRRR(this, kMipsI32x4LeS, node);
-}
-
-void InstructionSelector::VisitI32x4LtU(Node* node) {
- VisitRRR(this, kMipsI32x4LtU, node);
-}
-
-void InstructionSelector::VisitI32x4LeU(Node* node) {
- VisitRRR(this, kMipsI32x4LeU, node);
-}
-
-void InstructionSelector::VisitI16x8Splat(Node* node) {
- VisitRR(this, kMipsI16x8Splat, node);
-}
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
- VisitRRI(this, kMipsI16x8ExtractLane, node);
-}
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI16x8ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) {
- VisitRR(this, kMipsI16x8Neg, node);
-}
-
-void InstructionSelector::VisitI16x8Shl(Node* node) {
- VisitRRI(this, kMipsI16x8Shl, node);
-}
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) {
- VisitRRI(this, kMipsI16x8ShrS, node);
-}
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) {
- VisitRRI(this, kMipsI16x8ShrU, node);
-}
-
-void InstructionSelector::VisitI16x8Add(Node* node) {
- VisitRRR(this, kMipsI16x8Add, node);
-}
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- VisitRRR(this, kMipsI16x8AddSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) {
- VisitRRR(this, kMipsI16x8Sub, node);
-}
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- VisitRRR(this, kMipsI16x8SubSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Mul(Node* node) {
- VisitRRR(this, kMipsI16x8Mul, node);
-}
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) {
- VisitRRR(this, kMipsI16x8MaxS, node);
-}
-
-void InstructionSelector::VisitI16x8MinS(Node* node) {
- VisitRRR(this, kMipsI16x8MinS, node);
-}
-
-void InstructionSelector::VisitI16x8Eq(Node* node) {
- VisitRRR(this, kMipsI16x8Eq, node);
-}
-
-void InstructionSelector::VisitI16x8Ne(Node* node) {
- VisitRRR(this, kMipsI16x8Ne, node);
-}
-
-void InstructionSelector::VisitI16x8LtS(Node* node) {
- VisitRRR(this, kMipsI16x8LtS, node);
-}
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kMips##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
-void InstructionSelector::VisitI16x8LeS(Node* node) {
- VisitRRR(this, kMipsI16x8LeS, node);
-}
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kMips##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- VisitRRR(this, kMipsI16x8AddSaturateU, node);
-}
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kMips##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- VisitRRR(this, kMipsI16x8SubSaturateU, node);
-}
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
-void InstructionSelector::VisitI16x8MaxU(Node* node) {
- VisitRRR(this, kMipsI16x8MaxU, node);
-}
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kMips##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
-void InstructionSelector::VisitI16x8MinU(Node* node) {
- VisitRRR(this, kMipsI16x8MinU, node);
-}
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
-void InstructionSelector::VisitI16x8LtU(Node* node) {
- VisitRRR(this, kMipsI16x8LtU, node);
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kMipsS128Select, node);
}
-void InstructionSelector::VisitI16x8LeU(Node* node) {
- VisitRRR(this, kMipsI16x8LeU, node);
-}
+namespace {
-void InstructionSelector::VisitI8x16Splat(Node* node) {
- VisitRR(this, kMipsI8x16Splat, node);
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
}
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
- VisitRRI(this, kMipsI8x16ExtractLane, node);
+// Tries to match byte shuffle to concatenate (sldi) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
}
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
- VisitRRIR(this, kMipsI8x16ReplaceLane, node);
-}
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
-void InstructionSelector::VisitI8x16Neg(Node* node) {
- VisitRR(this, kMipsI8x16Neg, node);
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kMipsS32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMipsS32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kMipsS32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kMipsS32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kMipsS32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMipsS32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kMipsS16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kMipsS16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kMipsS16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kMipsS16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kMipsS16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kMipsS16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kMipsS8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kMipsS8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kMipsS8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kMipsS8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kMipsS8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kMipsS8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
}
-void InstructionSelector::VisitI8x16Shl(Node* node) {
- VisitRRI(this, kMipsI8x16Shl, node);
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kUnaryShuffleMask;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kUnaryShuffleMask;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kUnaryShuffleMask;
+ }
+ }
+ return mask;
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- VisitRRI(this, kMipsI8x16ShrS, node);
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; --i) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
}
-void InstructionSelector::VisitS16x8Select(Node* node) {
- VisitRRRR(this, kMipsS16x8Select, node);
-}
+} // namespace
-void InstructionSelector::VisitS8x16Select(Node* node) {
- VisitRRRR(this, kMipsS8x16Select, node);
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ uint8_t offset;
+ MipsOperandGenerator g(this);
+ if (TryMatchConcat(shuffle, mask, &offset)) {
+ Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(offset));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
// static
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index f4fb71d989..b9957732dc 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -14,8 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
@@ -81,11 +80,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kInt64:
return Operand(constant.ToInt64());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
@@ -96,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand(zero_reg);
}
Operand InputOperand(size_t index) {
@@ -120,7 +116,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
UNREACHABLE();
}
UNREACHABLE();
- return MemOperand(no_reg);
}
MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
@@ -233,7 +228,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -251,10 +247,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ Daddu(scratch1_, object_, index_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
__ Pop(ra);
}
@@ -268,15 +264,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
-#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
class ool_name final : public OutOfLineCode { \
public: \
ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
: OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
\
- void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
\
private: \
T const dst_; \
@@ -320,7 +317,6 @@ Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -334,7 +330,6 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -348,7 +343,6 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
@@ -382,30 +376,29 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
break;
}
UNREACHABLE();
- return kNoFPUCondition;
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
- __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
- Operand(zero_reg)); \
- } else { \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
+ do { \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
+ __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
+ Operand(zero_reg)); \
+ } else { \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
+ } \
} while (0)
-#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
- __ Or(kScratchReg, zero_reg, Operand(offset)); \
- __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
- __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
- } else { \
- __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
- } \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
+ do { \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
+ __ Or(kScratchReg, zero_reg, Operand(offset)); \
+ __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
+ __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
+ } else { \
+ __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
+ } \
} while (0)
#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
@@ -553,27 +546,29 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
__ sync(); \
} while (0)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
- /* Move the result in the double result register. */ \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(tasm()->isolate()), 0, \
+ 2); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 1, kScratchReg); \
- __ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
- /* Move the result in the double result register. */ \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 1, kScratchReg); \
+ __ MovToFloatParameter(i.InputDoubleRegister(0)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(tasm()->isolate()), 0, \
+ 1); \
+ /* Move the result in the double result register. */ \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -616,7 +611,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -624,10 +619,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
+ tasm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
+ tasm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -636,13 +631,13 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -656,8 +651,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
@@ -674,8 +668,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (instr->InputAt(0)->IsImmediate()) {
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
} else {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Jump(at);
@@ -790,7 +783,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -893,8 +887,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
break;
}
case kIeee754Float64Sin:
@@ -1064,140 +1058,126 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dclz(i.OutputRegister(), i.InputRegister(0));
break;
case kMips64Ctz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Subu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ clz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x1F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x20);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ rotr(dst, src, 16);
+ __ wsbh(dst, dst);
+ __ bitswap(dst, dst);
+ __ Clz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Daddu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ Clz(dst, dst);
+ // Subtract number of leading zeroes from 32 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 32);
+ __ Subu(dst, kScratchReg, dst);
+ }
} break;
case kMips64Dctz: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- Label skip_for_zero;
- Label end;
- // Branch if the operand is zero
- __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
- // Find the number of bits before the last bit set to 1.
- __ Dsubu(reg2, zero_reg, i.InputRegister(0));
- __ And(reg2, reg2, i.InputRegister(0));
- __ dclz(reg2, reg2);
- // Get the number of bits after the last bit set to 1.
- __ li(reg1, 0x3F);
- __ Subu(i.OutputRegister(), reg1, reg2);
- __ Branch(&end);
- __ bind(&skip_for_zero);
- // If the operand is zero, return word length as the result.
- __ li(i.OutputRegister(), 0x40);
- __ bind(&end);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ if (kArchVariant == kMips64r6) {
+ // We don't have an instruction to count the number of trailing zeroes.
+ // Start by flipping the bits end-for-end so we can count the number of
+ // leading zeroes instead.
+ __ dsbh(dst, src);
+ __ dshd(dst, dst);
+ __ dbitswap(dst, dst);
+ __ dclz(dst, dst);
+ } else {
+ // Convert trailing zeroes to trailing ones, and bits to their left
+ // to zeroes.
+ __ Daddu(kScratchReg, src, -1);
+ __ Xor(dst, kScratchReg, src);
+ __ And(dst, dst, kScratchReg);
+ // Count number of leading zeroes.
+ __ dclz(dst, dst);
+ // Subtract number of leading zeroes from 64 to get number of trailing
+ // ones. Remember that the trailing ones were formerly trailing zeroes.
+ __ li(kScratchReg, 64);
+ __ Dsubu(dst, kScratchReg, dst);
+ }
} break;
case kMips64Popcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint32_t m1 = 0x55555555;
- uint32_t m2 = 0x33333333;
- uint32_t m4 = 0x0f0f0f0f;
- uint32_t m8 = 0x00ff00ff;
- uint32_t m16 = 0x0000ffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ dsrl(reg1, i.InputRegister(0), 1);
- __ And(reg2, i.InputRegister(0), at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ dsrl(reg2, reg1, 2);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ dsrl(reg2, reg1, 4);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ dsrl(reg2, reg1, 8);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m16);
- __ dsrl(reg2, reg1, 16);
- __ And(reg2, reg2, at);
- __ And(reg1, reg1, at);
- __ Daddu(i.OutputRegister(), reg1, reg2);
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // For comparison, for 32-bit quantities, this algorithm can be executed
+ // using 20 MIPS instructions (the calls to LoadConst32() generate two
+ // machine instructions each for the values being used in this algorithm).
+ // A(n unrolled) loop-based algorithm requires 25 instructions.
+ //
+ // For a 64-bit operand this can be performed in 24 instructions compared
+ // to a(n unrolled) loop based algorithm which requires 38 instructions.
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint32_t B0 = 0x55555555; // (T)~(T)0/3
+ uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ srl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Subu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ srl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Addu(kScratchReg, dst, kScratchReg);
+ __ srl(dst, kScratchReg, 4);
+ __ Addu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Mul(dst, dst, kScratchReg);
+ __ srl(dst, dst, shift);
} break;
case kMips64Dpopcnt: {
- Register reg1 = kScratchReg;
- Register reg2 = kScratchReg2;
- uint64_t m1 = 0x5555555555555555;
- uint64_t m2 = 0x3333333333333333;
- uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
- uint64_t m8 = 0x00ff00ff00ff00ff;
- uint64_t m16 = 0x0000ffff0000ffff;
- uint64_t m32 = 0x00000000ffffffff;
-
- // Put count of ones in every 2 bits into those 2 bits.
- __ li(at, m1);
- __ dsrl(reg1, i.InputRegister(0), 1);
- __ and_(reg2, i.InputRegister(0), at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 4 bits into those 4 bits.
- __ li(at, m2);
- __ dsrl(reg2, reg1, 2);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 8 bits into those 8 bits.
- __ li(at, m4);
- __ dsrl(reg2, reg1, 4);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 16 bits into those 16 bits.
- __ li(at, m8);
- __ dsrl(reg2, reg1, 8);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Put count of ones in every 32 bits into those 32 bits.
- __ li(at, m16);
- __ dsrl(reg2, reg1, 16);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(reg1, reg1, reg2);
-
- // Calculate total number of ones.
- __ li(at, m32);
- __ dsrl32(reg2, reg1, 0);
- __ and_(reg2, reg2, at);
- __ and_(reg1, reg1, at);
- __ Daddu(i.OutputRegister(), reg1, reg2);
+ Register src = i.InputRegister(0);
+ Register dst = i.OutputRegister();
+ uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ uint64_t B2 = 0x0f0f0f0f0f0f0f0fl; // (T)~(T)0/255*15
+ uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+ __ dsrl(kScratchReg, src, 1);
+ __ li(kScratchReg2, B0);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Dsubu(kScratchReg, src, kScratchReg);
+ __ li(kScratchReg2, B1);
+ __ And(dst, kScratchReg, kScratchReg2);
+ __ dsrl(kScratchReg, kScratchReg, 2);
+ __ And(kScratchReg, kScratchReg, kScratchReg2);
+ __ Daddu(kScratchReg, dst, kScratchReg);
+ __ dsrl(dst, kScratchReg, 4);
+ __ Daddu(dst, dst, kScratchReg);
+ __ li(kScratchReg2, B2);
+ __ And(dst, dst, kScratchReg2);
+ __ li(kScratchReg, value);
+ __ Dmul(dst, dst, kScratchReg);
+ __ dsrl32(dst, dst, shift);
} break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
@@ -1341,13 +1321,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64ModS: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
// TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(tasm()->isolate()), 0,
+ 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputSingleRegister());
break;
@@ -1382,26 +1363,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMips64MaddS:
- __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MaddD:
- __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MsubS:
- __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2),
- kScratchDoubleReg);
- break;
- case kMips64MsubD:
- __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2),
- kScratchDoubleReg);
- break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1414,12 +1375,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64ModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
- FrameScope scope(masm(), StackFrame::MANUAL);
+ FrameScope scope(tasm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
__ MovToFloatParameters(i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 0, 2);
+ __ CallCFunction(
+ ExternalReference::mod_two_doubles_operation(tasm()->isolate()), 0,
+ 2);
// Move the result in the double result register.
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
@@ -1974,24 +1936,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Zero: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(),
i.OutputSimd128Register());
break;
}
case kMips64I32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_w(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2001,31 +1963,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ FmoveLow(kScratchReg, i.InputSingleRegister(0));
__ fill_w(i.OutputSimd128Register(), kScratchReg);
break;
}
case kMips64F32x4ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1));
__ FmoveLow(i.OutputSingleRegister(), kScratchReg);
break;
}
case kMips64F32x4ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2036,213 +1998,211 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64F32x4SConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4UConvertI32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
case kMips64I32x4Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt5(1));
break;
}
case kMips64I32x4MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64S32x4Select:
- case kMips64S16x8Select:
- case kMips64S8x16Select: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ case kMips64S128Select: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Abs: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
break;
}
case kMips64F32x4RecipApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4RecipSqrtApprox: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64F32x4Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Max: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Min: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Lt: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64F32x4Le: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I32x4SConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4UConvertF32x4: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kMips64I32x4Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I32x4LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I32x4GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I16x8Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I16x8ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I16x8ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2252,146 +2212,146 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I16x8Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I16x8Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8ShrU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt4(1));
break;
}
case kMips64I16x8Add: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8AddSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Sub: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSaturateS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Mul: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Eq: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8Ne: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register();
__ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
__ nor_v(dst, dst, dst);
break;
}
- case kMips64I16x8LtS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I16x8LeS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I16x8AddSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8SubSaturateU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MaxU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kMips64I16x8MinU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kMips64I16x8LtU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
- case kMips64I16x8LeU: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
- __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ case kMips64I16x8GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
break;
}
case kMips64I8x16Splat: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
case kMips64I8x16ExtractLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kMips64I8x16ReplaceLane: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register src = i.InputSimd128Register(0);
Simd128Register dst = i.OutputSimd128Register();
if (!src.is(dst)) {
@@ -2401,24 +2361,636 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64I8x16Neg: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
__ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
i.InputSimd128Register(0));
break;
}
case kMips64I8x16Shl: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
case kMips64I8x16ShrS: {
- CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
__ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt3(1));
break;
}
+ case kMips64I8x16Add: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16AddSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Sub: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16SubSaturateS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Mul: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ mulv_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MaxS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MinS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Eq: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Ne: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMips64I8x16GtS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16GeS: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16ShrU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMips64I8x16AddSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16SubSaturateU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MaxU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16MinU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16GtU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16GeU: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64S128And: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Or: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Xor: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64S128Not: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64S1x4AnyTrue:
+ case kMips64S1x8AnyTrue:
+ case kMips64S1x16AnyTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_false;
+ __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, 0); // branch delay slot
+ __ li(dst, -1);
+ __ bind(&all_false);
+ break;
+ }
+ case kMips64S1x4AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64S1x8AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64S1x16AllTrue: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
+ i.InputSimd128Register(0), USE_DELAY_SLOT);
+ __ li(dst, -1); // branch delay slot
+ __ li(dst, 0);
+ __ bind(&all_true);
+ break;
+ }
+ case kMips64MsaLd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ ld_b(i.OutputSimd128Register(), i.MemoryOperand());
+ break;
+ }
+ case kMips64MsaSt: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ st_b(i.InputSimd128Register(2), i.MemoryOperand());
+ break;
+ }
+ case kMips64S32x4InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [5, 1, 4, 0]
+ __ ilvr_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 6, 2]
+ __ ilvl_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 4, 2, 0]
+ __ pckev_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 5, 3, 1]
+ __ pckod_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [6, 2, 4, 0]
+ __ ilvev_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0]
+ // dst = [7, 3, 5, 1]
+ __ ilvod_w(dst, src1, src0);
+ break;
+ }
+ case kMips64S32x4Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int32_t shuffle = i.InputInt32(2);
+
+ if (src0.is(src1)) {
+ // Unary S32x4 shuffles are handled with shf.w instruction
+ uint32_t i8 = 0;
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0xff;
+ DCHECK(lane < 4);
+ i8 |= lane << (2 * i);
+ shuffle >>= 8;
+ }
+ __ shf_w(dst, src0, i8);
+ } else {
+ // For binary shuffles use vshf.w instruction
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ __ li(kScratchReg, i.InputInt32(2));
+ __ insert_w(dst, 0, kScratchReg);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(dst, kSimd128RegZero, dst);
+ __ ilvr_h(dst, kSimd128RegZero, dst);
+ __ vshf_w(dst, src1, src0);
+ }
+ break;
+ }
+ case kMips64S16x8InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [11, 3, 10, 2, 9, 1, 8, 0]
+ __ ilvr_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, 14, 6, 13, 5, 12, 4]
+ __ ilvl_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 12, 10, 8, 6, 4, 2, 0]
+ __ pckev_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 13, 11, 9, 7, 5, 3, 1]
+ __ pckod_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [14, 6, 12, 4, 10, 2, 8, 0]
+ __ ilvev_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x8InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0]
+ // dst = [15, 7, ... 11, 3, 9, 1]
+ __ ilvod_h(dst, src1, src0);
+ break;
+ }
+ case kMips64S16x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMips64S16x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMips64S8x16InterleaveRight: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [23, 7, ... 17, 1, 16, 0]
+ __ ilvr_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveLeft: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 25, 9, 24, 8]
+ __ ilvl_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16PackEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 28, ... 6, 4, 2, 0]
+ __ pckev_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16PackOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 29, ... 7, 5, 3, 1]
+ __ pckod_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveEven: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [30, 14, ... 18, 2, 16, 0]
+ __ ilvev_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16InterleaveOdd: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0]
+ // dst = [31, 15, ... 19, 3, 17, 1]
+ __ ilvod_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x16Concat: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kMips64S8x16Shuffle: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ if (dst.is(src0)) {
+ __ move_v(kSimd128ScratchReg, src0);
+ src0 = kSimd128ScratchReg;
+ } else if (dst.is(src1)) {
+ __ move_v(kSimd128ScratchReg, src1);
+ src1 = kSimd128ScratchReg;
+ }
+
+ int64_t control_low =
+ static_cast<int64_t>(i.InputInt32(3)) << 32 | i.InputInt32(2);
+ int64_t control_hi =
+ static_cast<int64_t>(i.InputInt32(5)) << 32 | i.InputInt32(4);
+ __ li(kScratchReg, control_low);
+ __ insert_d(dst, 0, kScratchReg);
+ __ li(kScratchReg, control_hi);
+ __ insert_d(dst, 1, kScratchReg);
+ __ vshf_b(dst, src1, src0);
+ break;
+ }
+ case kMips64S8x8Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
+ // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7]
+ // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1
+ // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B
+ __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1);
+ __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B);
+ break;
+ }
+ case kMips64S8x4Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3]
+ // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B);
+ break;
+ }
+ case kMips64S8x2Reverse: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1]
+ // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1
+ __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1);
+ break;
+ }
+ case kMips64I32x4SConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMips64I32x4SConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_h(kSimd128ScratchReg, src, src);
+ __ slli_w(dst, kSimd128ScratchReg, 16);
+ __ srai_w(dst, dst, 16);
+ break;
+ }
+ case kMips64I32x4UConvertI16x8Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4UConvertI16x8High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8SConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvr_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMips64I16x8SConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ ilvl_b(kSimd128ScratchReg, src, src);
+ __ slli_h(dst, kSimd128ScratchReg, 8);
+ __ srai_h(dst, dst, 8);
+ break;
+ }
+ case kMips64I16x8SConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_w(kSimd128ScratchReg, src0, 15);
+ __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8UConvertI32x4: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_w(kSimd128ScratchReg, src0, 15);
+ __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8UConvertI8x16Low: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8UConvertI8x16High: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16SConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_s_h(kSimd128ScratchReg, src0, 7);
+ __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I8x16UConvertI16x8: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ __ sat_u_h(kSimd128ScratchReg, src0, 7);
+ __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch
+ __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64F32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ shf_w(kSimd128ScratchReg, src0, 0xB1); // 2 3 0 1 : 10110001 : 0xB1
+ __ shf_w(kSimd128RegZero, src1, 0xB1); // kSimd128RegZero as scratch
+ __ fadd_w(kSimd128ScratchReg, kSimd128ScratchReg, src0);
+ __ fadd_w(kSimd128RegZero, kSimd128RegZero, src1);
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I32x4AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_d(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_d(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_w(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
+ case kMips64I16x8AddHoriz: {
+ CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
+ Simd128Register src0 = i.InputSimd128Register(0);
+ Simd128Register src1 = i.InputSimd128Register(1);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ hadd_s_w(kSimd128ScratchReg, src0, src0);
+ __ hadd_s_w(kSimd128RegZero, src1, src1); // kSimd128RegZero as scratch
+ __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg);
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2455,11 +3027,11 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
-void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
#undef __
-#define __ masm->
+#define __ tasm->
MipsOperandConverter i(gen, instr);
Condition cc = kNoCondition;
@@ -2554,7 +3126,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
}
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
#undef __
-#define __ masm()->
+#define __ tasm()->
}
// Assembles branches after an instruction.
@@ -2562,7 +3134,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
- AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
branch->fallthru);
}
@@ -2603,14 +3175,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ tasm()->isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(tasm()->isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2628,7 +3200,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
bool frame_elided = !frame_access_state()->has_frame();
auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
Label* tlabel = ool->entry();
- AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
}
// Assembles boolean materializations after an instruction.
@@ -2650,7 +3222,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
if (instr->InputAt(1)->IsImmediate() &&
- base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
+ base::bits::IsPowerOfTwo(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
__ Dext(result, i.InputRegister(0), pos, 1);
@@ -2838,9 +3410,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ tasm()->isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2895,7 +3467,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
if (shrink_slots > 0) {
@@ -3005,7 +3577,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kInt64:
if (RelocInfo::IsWasmPtrReference(src.rmode())) {
@@ -3016,7 +3588,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat64:
- __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ li(dst, Operand(src.ToExternalReference()));
@@ -3055,7 +3627,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- __ Move(dst, src.ToFloat64());
+ __ Move(dst, src.ToFloat64().value());
if (destination->IsFPStackSlot()) {
__ Sdc1(dst, g.ToMemOperand(destination));
}
@@ -3166,11 +3738,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 02cd4d5852..1b420d3819 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -85,10 +85,6 @@ namespace compiler {
V(Mips64SqrtD) \
V(Mips64MaxD) \
V(Mips64MinD) \
- V(Mips64MaddS) \
- V(Mips64MaddD) \
- V(Mips64MsubS) \
- V(Mips64MsubD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
@@ -172,6 +168,7 @@ namespace compiler {
V(Mips64I32x4ExtractLane) \
V(Mips64I32x4ReplaceLane) \
V(Mips64I32x4Add) \
+ V(Mips64I32x4AddHoriz) \
V(Mips64I32x4Sub) \
V(Mips64F32x4Splat) \
V(Mips64F32x4ExtractLane) \
@@ -188,12 +185,12 @@ namespace compiler {
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
- V(Mips64S32x4Select) \
V(Mips64F32x4Abs) \
V(Mips64F32x4Neg) \
V(Mips64F32x4RecipApprox) \
V(Mips64F32x4RecipSqrtApprox) \
V(Mips64F32x4Add) \
+ V(Mips64F32x4AddHoriz) \
V(Mips64F32x4Sub) \
V(Mips64F32x4Mul) \
V(Mips64F32x4Max) \
@@ -205,10 +202,10 @@ namespace compiler {
V(Mips64I32x4SConvertF32x4) \
V(Mips64I32x4UConvertF32x4) \
V(Mips64I32x4Neg) \
- V(Mips64I32x4LtS) \
- V(Mips64I32x4LeS) \
- V(Mips64I32x4LtU) \
- V(Mips64I32x4LeU) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
V(Mips64I16x8Splat) \
V(Mips64I16x8ExtractLane) \
V(Mips64I16x8ReplaceLane) \
@@ -218,6 +215,7 @@ namespace compiler {
V(Mips64I16x8ShrU) \
V(Mips64I16x8Add) \
V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddHoriz) \
V(Mips64I16x8Sub) \
V(Mips64I16x8SubSaturateS) \
V(Mips64I16x8Mul) \
@@ -225,22 +223,89 @@ namespace compiler {
V(Mips64I16x8MinS) \
V(Mips64I16x8Eq) \
V(Mips64I16x8Ne) \
- V(Mips64I16x8LtS) \
- V(Mips64I16x8LeS) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
V(Mips64I16x8AddSaturateU) \
V(Mips64I16x8SubSaturateU) \
V(Mips64I16x8MaxU) \
V(Mips64I16x8MinU) \
- V(Mips64I16x8LtU) \
- V(Mips64I16x8LeU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
V(Mips64I8x16Splat) \
V(Mips64I8x16ExtractLane) \
V(Mips64I8x16ReplaceLane) \
V(Mips64I8x16Neg) \
V(Mips64I8x16Shl) \
V(Mips64I8x16ShrS) \
- V(Mips64S16x8Select) \
- V(Mips64S8x16Select)
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16Mul) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSaturateU) \
+ V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S1x4AnyTrue) \
+ V(Mips64S1x4AllTrue) \
+ V(Mips64S1x8AnyTrue) \
+ V(Mips64S1x8AllTrue) \
+ V(Mips64S1x16AnyTrue) \
+ V(Mips64S1x16AllTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64S8x16Shuffle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index b4664d036a..1f26d5992b 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -411,10 +411,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaLd;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -491,10 +490,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Sd;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaSt;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -981,20 +979,20 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
Mips64OperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = m.right().Value();
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1038,21 +1036,21 @@ void InstructionSelector::VisitInt64Mul(Node* node) {
Int64BinopMatcher m(node);
// TODO(dusmil): Add optimization for shifts larger than 32.
if (m.right().HasValue() && m.right().Value() > 0) {
- int32_t value = static_cast<int32_t>(m.right().Value());
- if (base::bits::IsPowerOfTwo32(value)) {
+ uint32_t value = static_cast<uint32_t>(m.right().Value());
+ if (base::bits::IsPowerOfTwo(value)) {
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value)));
return;
}
- if (base::bits::IsPowerOfTwo32(value - 1)) {
+ if (base::bits::IsPowerOfTwo(value - 1)) {
// Dlsa macro will handle the shifting value out of bound cases.
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
g.TempImmediate(WhichPowerOf2(value - 1)));
return;
}
- if (base::bits::IsPowerOfTwo32(value + 1)) {
+ if (base::bits::IsPowerOfTwo(value + 1)) {
InstructionOperand temp = g.TempRegister();
Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
g.UseRegister(m.left().node()),
@@ -1496,84 +1494,28 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- Emit(kMips64MaddS, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- Emit(kMips64MaddS, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- }
+ // Optimization with Madd.S(z, x, y) is intentionally removed.
+ // See explanation for madd_s in assembler-mips64.cc.
VisitRRR(this, kMips64AddS, node);
}
void InstructionSelector::VisitFloat64Add(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- Emit(kMips64MaddD, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- Emit(kMips64MaddD, g.DefineAsRegister(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
- }
+ // Optimization with Madd.D(z, x, y) is intentionally removed.
+ // See explanation for madd_d in assembler-mips64.cc.
VisitRRR(this, kMips64AddD, node);
}
void InstructionSelector::VisitFloat32Sub(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
- Float32BinopMatcher mleft(m.left().node());
- Emit(kMips64MsubS, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- }
+ // Optimization with Msub.S(z, x, y) is intentionally removed.
+ // See explanation for madd_s in assembler-mips64.cc.
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
- Mips64OperandGenerator g(this);
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
- Float64BinopMatcher mleft(m.left().node());
- Emit(kMips64MsubD, g.DefineAsRegister(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
- }
- }
+ // Optimization with Msub.D(z, x, y) is intentionally removed.
+ // See explanation for madd_d in assembler-mips64.cc.
VisitRRR(this, kMips64SubD, node);
}
@@ -1785,10 +1727,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Uld;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaLd;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1838,10 +1779,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Usd;
break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
+ case MachineRepresentation::kSimd128:
+ opcode = kMips64MsaSt;
+ break;
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1892,9 +1832,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1956,9 +1893,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged:
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -2387,6 +2321,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 10 + 2 * sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 2 + 2 * sw.case_count;
@@ -2394,7 +2329,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
@@ -2693,316 +2629,353 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-void InstructionSelector::VisitI32x4Splat(Node* node) {
- VisitRR(this, kMips64I32x4Splat, node);
-}
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) {
- VisitRRI(this, kMips64I32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI32x4Add(Node* node) {
- VisitRRR(this, kMips64I32x4Add, node);
-}
-
-void InstructionSelector::VisitI32x4Sub(Node* node) {
- VisitRRR(this, kMips64I32x4Sub, node);
-}
+#define SIMD_TYPE_LIST(V) \
+ V(F32x4) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
+ V(F32x4Abs, kMips64F32x4Abs) \
+ V(F32x4Neg, kMips64F32x4Neg) \
+ V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
+ V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
+ V(I32x4Neg, kMips64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
+ V(I16x8Neg, kMips64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
+ V(I8x16Neg, kMips64I8x16Neg) \
+ V(S128Not, kMips64S128Not) \
+ V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
+ V(S1x4AllTrue, kMips64S1x4AllTrue) \
+ V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
+ V(S1x8AllTrue, kMips64S1x8AllTrue) \
+ V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
+ V(S1x16AllTrue, kMips64S1x16AllTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kMips64F32x4Add) \
+ V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
+ V(F32x4Sub, kMips64F32x4Sub) \
+ V(F32x4Mul, kMips64F32x4Mul) \
+ V(F32x4Max, kMips64F32x4Max) \
+ V(F32x4Min, kMips64F32x4Min) \
+ V(F32x4Eq, kMips64F32x4Eq) \
+ V(F32x4Ne, kMips64F32x4Ne) \
+ V(F32x4Lt, kMips64F32x4Lt) \
+ V(F32x4Le, kMips64F32x4Le) \
+ V(I32x4Add, kMips64I32x4Add) \
+ V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
+ V(I32x4Sub, kMips64I32x4Sub) \
+ V(I32x4Mul, kMips64I32x4Mul) \
+ V(I32x4MaxS, kMips64I32x4MaxS) \
+ V(I32x4MinS, kMips64I32x4MinS) \
+ V(I32x4MaxU, kMips64I32x4MaxU) \
+ V(I32x4MinU, kMips64I32x4MinU) \
+ V(I32x4Eq, kMips64I32x4Eq) \
+ V(I32x4Ne, kMips64I32x4Ne) \
+ V(I32x4GtS, kMips64I32x4GtS) \
+ V(I32x4GeS, kMips64I32x4GeS) \
+ V(I32x4GtU, kMips64I32x4GtU) \
+ V(I32x4GeU, kMips64I32x4GeU) \
+ V(I16x8Add, kMips64I16x8Add) \
+ V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
+ V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
+ V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
+ V(I16x8Sub, kMips64I16x8Sub) \
+ V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
+ V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
+ V(I16x8Mul, kMips64I16x8Mul) \
+ V(I16x8MaxS, kMips64I16x8MaxS) \
+ V(I16x8MinS, kMips64I16x8MinS) \
+ V(I16x8MaxU, kMips64I16x8MaxU) \
+ V(I16x8MinU, kMips64I16x8MinU) \
+ V(I16x8Eq, kMips64I16x8Eq) \
+ V(I16x8Ne, kMips64I16x8Ne) \
+ V(I16x8GtS, kMips64I16x8GtS) \
+ V(I16x8GeS, kMips64I16x8GeS) \
+ V(I16x8GtU, kMips64I16x8GtU) \
+ V(I16x8GeU, kMips64I16x8GeU) \
+ V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
+ V(I8x16Add, kMips64I8x16Add) \
+ V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
+ V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
+ V(I8x16Sub, kMips64I8x16Sub) \
+ V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
+ V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
+ V(I8x16Mul, kMips64I8x16Mul) \
+ V(I8x16MaxS, kMips64I8x16MaxS) \
+ V(I8x16MinS, kMips64I8x16MinS) \
+ V(I8x16MaxU, kMips64I8x16MaxU) \
+ V(I8x16MinU, kMips64I8x16MinU) \
+ V(I8x16Eq, kMips64I8x16Eq) \
+ V(I8x16Ne, kMips64I8x16Ne) \
+ V(I8x16GtS, kMips64I8x16GtS) \
+ V(I8x16GeS, kMips64I8x16GeS) \
+ V(I8x16GtU, kMips64I8x16GtU) \
+ V(I8x16GeU, kMips64I8x16GeU) \
+ V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
+ V(S128And, kMips64S128And) \
+ V(S128Or, kMips64S128Or) \
+ V(S128Xor, kMips64S128Xor)
void InstructionSelector::VisitS128Zero(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
}
-void InstructionSelector::VisitS1x4Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x8Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitS1x16Zero(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
-}
-
-void InstructionSelector::VisitF32x4Splat(Node* node) {
- VisitRR(this, kMips64F32x4Splat, node);
-}
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
- VisitRRI(this, kMips64F32x4ExtractLane, node);
-}
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64F32x4ReplaceLane, node);
-}
-
-void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
- VisitRR(this, kMips64F32x4SConvertI32x4, node);
-}
-
-void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
- VisitRR(this, kMips64F32x4UConvertI32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Mul(Node* node) {
- VisitRRR(this, kMips64I32x4Mul, node);
-}
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) {
- VisitRRR(this, kMips64I32x4MaxS, node);
-}
-
-void InstructionSelector::VisitI32x4MinS(Node* node) {
- VisitRRR(this, kMips64I32x4MinS, node);
-}
-
-void InstructionSelector::VisitI32x4Eq(Node* node) {
- VisitRRR(this, kMips64I32x4Eq, node);
-}
-
-void InstructionSelector::VisitI32x4Ne(Node* node) {
- VisitRRR(this, kMips64I32x4Ne, node);
-}
-
-void InstructionSelector::VisitI32x4Shl(Node* node) {
- VisitRRI(this, kMips64I32x4Shl, node);
-}
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) {
- VisitRRI(this, kMips64I32x4ShrS, node);
-}
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) {
- VisitRRI(this, kMips64I32x4ShrU, node);
-}
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) {
- VisitRRR(this, kMips64I32x4MaxU, node);
-}
-
-void InstructionSelector::VisitI32x4MinU(Node* node) {
- VisitRRR(this, kMips64I32x4MinU, node);
-}
-
-void InstructionSelector::VisitS32x4Select(Node* node) {
- VisitRRRR(this, kMips64S32x4Select, node);
-}
-
-void InstructionSelector::VisitF32x4Abs(Node* node) {
- VisitRR(this, kMips64F32x4Abs, node);
-}
-
-void InstructionSelector::VisitF32x4Neg(Node* node) {
- VisitRR(this, kMips64F32x4Neg, node);
-}
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
- VisitRR(this, kMips64F32x4RecipApprox, node);
-}
-
-void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
- VisitRR(this, kMips64F32x4RecipSqrtApprox, node);
-}
-
-void InstructionSelector::VisitF32x4Add(Node* node) {
- VisitRRR(this, kMips64F32x4Add, node);
-}
-
-void InstructionSelector::VisitF32x4Sub(Node* node) {
- VisitRRR(this, kMips64F32x4Sub, node);
-}
-
-void InstructionSelector::VisitF32x4Mul(Node* node) {
- VisitRRR(this, kMips64F32x4Mul, node);
-}
-
-void InstructionSelector::VisitF32x4Max(Node* node) {
- VisitRRR(this, kMips64F32x4Max, node);
-}
-
-void InstructionSelector::VisitF32x4Min(Node* node) {
- VisitRRR(this, kMips64F32x4Min, node);
-}
-
-void InstructionSelector::VisitF32x4Eq(Node* node) {
- VisitRRR(this, kMips64F32x4Eq, node);
-}
-
-void InstructionSelector::VisitF32x4Ne(Node* node) {
- VisitRRR(this, kMips64F32x4Ne, node);
-}
-
-void InstructionSelector::VisitF32x4Lt(Node* node) {
- VisitRRR(this, kMips64F32x4Lt, node);
-}
-
-void InstructionSelector::VisitF32x4Le(Node* node) {
- VisitRRR(this, kMips64F32x4Le, node);
-}
-
-void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRR(this, kMips64I32x4SConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
- VisitRR(this, kMips64I32x4UConvertF32x4, node);
-}
-
-void InstructionSelector::VisitI32x4Neg(Node* node) {
- VisitRR(this, kMips64I32x4Neg, node);
-}
-
-void InstructionSelector::VisitI32x4LtS(Node* node) {
- VisitRRR(this, kMips64I32x4LtS, node);
-}
-
-void InstructionSelector::VisitI32x4LeS(Node* node) {
- VisitRRR(this, kMips64I32x4LeS, node);
-}
-
-void InstructionSelector::VisitI32x4LtU(Node* node) {
- VisitRRR(this, kMips64I32x4LtU, node);
-}
-
-void InstructionSelector::VisitI32x4LeU(Node* node) {
- VisitRRR(this, kMips64I32x4LeU, node);
-}
-
-void InstructionSelector::VisitI16x8Splat(Node* node) {
- VisitRR(this, kMips64I16x8Splat, node);
-}
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
- VisitRRI(this, kMips64I16x8ExtractLane, node);
-}
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I16x8ReplaceLane, node);
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) {
- VisitRR(this, kMips64I16x8Neg, node);
-}
-
-void InstructionSelector::VisitI16x8Shl(Node* node) {
- VisitRRI(this, kMips64I16x8Shl, node);
-}
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) {
- VisitRRI(this, kMips64I16x8ShrS, node);
-}
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) {
- VisitRRI(this, kMips64I16x8ShrU, node);
-}
-
-void InstructionSelector::VisitI16x8Add(Node* node) {
- VisitRRR(this, kMips64I16x8Add, node);
-}
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- VisitRRR(this, kMips64I16x8AddSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) {
- VisitRRR(this, kMips64I16x8Sub, node);
-}
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- VisitRRR(this, kMips64I16x8SubSaturateS, node);
-}
-
-void InstructionSelector::VisitI16x8Mul(Node* node) {
- VisitRRR(this, kMips64I16x8Mul, node);
-}
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) {
- VisitRRR(this, kMips64I16x8MaxS, node);
-}
-
-void InstructionSelector::VisitI16x8MinS(Node* node) {
- VisitRRR(this, kMips64I16x8MinS, node);
-}
-
-void InstructionSelector::VisitI16x8Eq(Node* node) {
- VisitRRR(this, kMips64I16x8Eq, node);
-}
-
-void InstructionSelector::VisitI16x8Ne(Node* node) {
- VisitRRR(this, kMips64I16x8Ne, node);
-}
-
-void InstructionSelector::VisitI16x8LtS(Node* node) {
- VisitRRR(this, kMips64I16x8LtS, node);
-}
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kMips64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
-void InstructionSelector::VisitI16x8LeS(Node* node) {
- VisitRRR(this, kMips64I16x8LeS, node);
-}
+#define SIMD_VISIT_EXTRACT_LANE(Type) \
+ void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+ VisitRRI(this, kMips64##Type##ExtractLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- VisitRRR(this, kMips64I16x8AddSaturateU, node);
-}
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- VisitRRR(this, kMips64I16x8SubSaturateU, node);
-}
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
-void InstructionSelector::VisitI16x8MaxU(Node* node) {
- VisitRRR(this, kMips64I16x8MaxU, node);
-}
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRI(this, kMips64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
-void InstructionSelector::VisitI16x8MinU(Node* node) {
- VisitRRR(this, kMips64I16x8MinU, node);
-}
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
-void InstructionSelector::VisitI16x8LtU(Node* node) {
- VisitRRR(this, kMips64I16x8LtU, node);
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kMips64S128Select, node);
}
-void InstructionSelector::VisitI16x8LeU(Node* node) {
- VisitRRR(this, kMips64I16x8LeU, node);
-}
+namespace {
-void InstructionSelector::VisitI8x16Splat(Node* node) {
- VisitRR(this, kMips64I8x16Splat, node);
+// Tries to match 8x16 byte shuffle to equivalent 32x4 word shuffle.
+bool TryMatch32x4Shuffle(const uint8_t* shuffle, uint8_t* shuffle32x4) {
+ static const int kLanes = 4;
+ static const int kLaneSize = 4;
+ for (int i = 0; i < kLanes; ++i) {
+ if (shuffle[i * kLaneSize] % kLaneSize != 0) return false;
+ for (int j = 1; j < kLaneSize; ++j) {
+ if (shuffle[i * kLaneSize + j] - shuffle[i * kLaneSize + j - 1] != 1)
+ return false;
+ }
+ shuffle32x4[i] = shuffle[i * kLaneSize] / kLaneSize;
+ }
+ return true;
}
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
- VisitRRI(this, kMips64I8x16ExtractLane, node);
+// Tries to match byte shuffle to concatenate (sldi) operation.
+bool TryMatchConcat(const uint8_t* shuffle, uint8_t mask, uint8_t* offset) {
+ uint8_t start = shuffle[0];
+ for (int i = 1; i < kSimd128Size - start; ++i) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return false;
+ }
+ uint8_t wrap = kSimd128Size;
+ for (int i = kSimd128Size - start; i < kSimd128Size; ++i, ++wrap) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return false;
+ }
+ *offset = start;
+ return true;
}
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
- VisitRRIR(this, kMips64I8x16ReplaceLane, node);
-}
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
-void InstructionSelector::VisitI8x16Neg(Node* node) {
- VisitRR(this, kMips64I8x16Neg, node);
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kMips64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMips64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kMips64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kMips64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kMips64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kMips64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kMips64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kMips64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kMips64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kMips64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kMips64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kMips64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kMips64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kMips64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kMips64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kMips64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kMips64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kMips64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kMips64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kMips64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kMips64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kMips64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kMips64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, uint8_t mask, ArchOpcode* opcode) {
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
}
-void InstructionSelector::VisitI8x16Shl(Node* node) {
- VisitRRI(this, kMips64I8x16Shl, node);
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node) {
+ static const int kUnaryShuffleMask = kSimd128Size - 1;
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = kUnaryShuffleMask;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < kSimd128Size; i++) {
+ if (shuffle[i] < kSimd128Size) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = kUnaryShuffleMask;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = kUnaryShuffleMask;
+ }
+ }
+ return mask;
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- VisitRRI(this, kMips64I8x16ShrS, node);
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; --i) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
}
-void InstructionSelector::VisitS16x8Select(Node* node) {
- VisitRRRR(this, kMips64S16x8Select, node);
-}
+} // namespace
-void InstructionSelector::VisitS8x16Select(Node* node) {
- VisitRRRR(this, kMips64S8x16Select, node);
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ mask, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ uint8_t offset;
+ Mips64OperandGenerator g(this);
+ if (TryMatchConcat(shuffle, mask, &offset)) {
+ Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(offset));
+ return;
+ }
+ if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle32x4, mask)));
+ return;
+ }
+ Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
}
// static
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index b62a8ccb4f..f4ef3273e6 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -105,7 +105,7 @@ class OperandSet {
}
static bool HasMixedFPReps(int reps) {
- return reps && !base::bits::IsPowerOfTwo32(reps);
+ return reps && !base::bits::IsPowerOfTwo(reps);
}
ZoneVector<InstructionOperand>* set_;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 550317d248..d1eecfe9fd 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -175,8 +175,7 @@ struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
return false;
}
Double value = Double(this->Value());
- return !value.IsInfinite() &&
- base::bits::IsPowerOfTwo64(value.Significand());
+ return !value.IsInfinite() && base::bits::IsPowerOfTwo(value.Significand());
}
};
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 02ab2ce044..55755649bc 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -138,7 +138,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
enum InferReceiverMapsResult {
kNoReceiverMaps, // No receiver maps inferred.
kReliableReceiverMaps, // Receiver maps can be trusted.
- kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
+ kUnreliableReceiverMaps // Receiver maps might have changed (side-effect),
+ // but instance type is reliable.
};
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index ce152b1512..c829a39e37 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -106,6 +106,7 @@
JS_COMPARE_BINOP_LIST(V) \
JS_BITWISE_BINOP_LIST(V) \
JS_ARITH_BINOP_LIST(V) \
+ V(JSHasInPrototypeChain) \
V(JSInstanceOf) \
V(JSOrdinaryHasInstance)
@@ -116,7 +117,8 @@
V(JSToName) \
V(JSToNumber) \
V(JSToObject) \
- V(JSToString)
+ V(JSToString) \
+ V(JSToPrimitiveToString)
#define JS_OTHER_UNOP_LIST(V) \
V(JSClassOf) \
@@ -161,9 +163,11 @@
#define JS_OTHER_OP_LIST(V) \
V(JSConstructForwardVarargs) \
V(JSConstruct) \
+ V(JSConstructWithArrayLike) \
V(JSConstructWithSpread) \
V(JSCallForwardVarargs) \
V(JSCall) \
+ V(JSCallWithArrayLike) \
V(JSCallWithSpread) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
@@ -177,6 +181,7 @@
V(JSGeneratorRestoreContinuation) \
V(JSGeneratorRestoreRegister) \
V(JSStackCheck) \
+ V(JSStringConcat) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -311,9 +316,12 @@
V(BooleanNot) \
V(StringCharAt) \
V(StringCharCodeAt) \
+ V(SeqStringCharCodeAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
V(StringIndexOf) \
+ V(StringToLowerCaseIntl) \
+ V(StringToUpperCaseIntl) \
V(CheckBounds) \
V(CheckIf) \
V(CheckMaps) \
@@ -321,10 +329,13 @@
V(CheckInternalizedString) \
V(CheckReceiver) \
V(CheckString) \
+ V(CheckSeqString) \
+ V(CheckNonEmptyString) \
+ V(CheckSymbol) \
V(CheckSmi) \
V(CheckHeapObject) \
V(CheckFloat64Hole) \
- V(CheckTaggedHole) \
+ V(CheckNotTaggedHole) \
V(ConvertTaggedHoleToUndefined) \
V(Allocate) \
V(LoadField) \
@@ -335,6 +346,7 @@
V(StoreBuffer) \
V(StoreElement) \
V(StoreTypedElement) \
+ V(TransitionAndStoreElement) \
V(ObjectIsDetectableCallable) \
V(ObjectIsNaN) \
V(ObjectIsNonCallable) \
@@ -350,7 +362,9 @@
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
- V(TransitionElementsKind)
+ V(TransitionElementsKind) \
+ V(LookupHashStorageIndex) \
+ V(LoadHashMapValue)
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
@@ -693,31 +707,12 @@
V(S128And) \
V(S128Or) \
V(S128Xor) \
- V(S32x4Shuffle) \
- V(S32x4Select) \
- V(S16x8Shuffle) \
- V(S16x8Select) \
+ V(S128Select) \
V(S8x16Shuffle) \
- V(S8x16Select) \
- V(S1x4Zero) \
- V(S1x4And) \
- V(S1x4Or) \
- V(S1x4Xor) \
- V(S1x4Not) \
V(S1x4AnyTrue) \
V(S1x4AllTrue) \
- V(S1x8Zero) \
- V(S1x8And) \
- V(S1x8Or) \
- V(S1x8Xor) \
- V(S1x8Not) \
V(S1x8AnyTrue) \
V(S1x8AllTrue) \
- V(S1x16Zero) \
- V(S1x16And) \
- V(S1x16Or) \
- V(S1x16Xor) \
- V(S1x16Not) \
V(S1x16AnyTrue) \
V(S1x16AllTrue)
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 35b24d8531..5a956dd9af 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -62,6 +62,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSLessThan:
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSHasInPrototypeChain:
case IrOpcode::kJSInstanceOf:
case IrOpcode::kJSOrdinaryHasInstance:
@@ -94,16 +95,20 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
// Call operations
case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSCallForwardVarargs:
case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
// Misc operations
+ case IrOpcode::kJSStringConcat:
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index e43cd5cdb0..2da48ca887 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -24,20 +24,16 @@ V8_INLINE N CheckRange(size_t val) {
} // namespace
-
-// static
-STATIC_CONST_MEMBER_DEFINITION const size_t Operator::kMaxControlOutputCount;
-
Operator::Operator(Opcode opcode, Properties properties, const char* mnemonic,
size_t value_in, size_t effect_in, size_t control_in,
size_t value_out, size_t effect_out, size_t control_out)
- : opcode_(opcode),
+ : mnemonic_(mnemonic),
+ opcode_(opcode),
properties_(properties),
- mnemonic_(mnemonic),
value_in_(CheckRange<uint32_t>(value_in)),
effect_in_(CheckRange<uint16_t>(effect_in)),
control_in_(CheckRange<uint16_t>(control_in)),
- value_out_(CheckRange<uint16_t>(value_out)),
+ value_out_(CheckRange<uint32_t>(value_out)),
effect_out_(CheckRange<uint8_t>(effect_out)),
control_out_(CheckRange<uint32_t>(control_out)) {}
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index dea94f0906..99e8461c86 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -95,9 +95,6 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
Properties properties() const { return properties_; }
- // TODO(bmeurer): Use bit fields below?
- static const size_t kMaxControlOutputCount = (1u << 16) - 1;
-
// TODO(titzer): convert return values here to size_t.
int ValueInputCount() const { return value_in_; }
int EffectInputCount() const { return effect_in_; }
@@ -136,13 +133,13 @@ class V8_EXPORT_PRIVATE Operator : public NON_EXPORTED_BASE(ZoneObject) {
virtual void PrintToImpl(std::ostream& os, PrintVerbosity verbose) const;
private:
+ const char* mnemonic_;
Opcode opcode_;
Properties properties_;
- const char* mnemonic_;
uint32_t value_in_;
uint16_t effect_in_;
uint16_t control_in_;
- uint16_t value_out_;
+ uint32_t value_out_;
uint8_t effect_out_;
uint32_t control_out_;
diff --git a/deps/v8/src/compiler/osr.h b/deps/v8/src/compiler/osr.h
index 1f562c56bf..075a9774a7 100644
--- a/deps/v8/src/compiler/osr.h
+++ b/deps/v8/src/compiler/osr.h
@@ -92,10 +92,6 @@ class Linkage;
class OsrHelper {
public:
explicit OsrHelper(CompilationInfo* info);
- // Only for testing.
- OsrHelper(size_t parameter_count, size_t stack_slot_count)
- : parameter_count_(parameter_count),
- stack_slot_count_(stack_slot_count) {}
// Deconstructs the artificial {OsrNormalEntry} and rewrites the graph so
// that only the path corresponding to {OsrLoopEntry} remains.
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 2b6ffe418c..99ef25f457 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -8,6 +8,8 @@
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/zone-stats.h"
#include "src/isolate.h"
+#include "src/objects/shared-function-info.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index bc8fd0cbe9..0bb242716f 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -9,6 +9,7 @@
#include <sstream>
#include "src/base/adapters.h"
+#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -17,6 +18,7 @@
#include "src/compiler/basic-block-instrumentor.h"
#include "src/compiler/branch-elimination.h"
#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/check-elimination.h"
#include "src/compiler/checkpoint-elimination.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
@@ -62,7 +64,6 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
-#include "src/compiler/tail-call-optimization.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
@@ -73,8 +74,8 @@
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/type-info.h"
#include "src/utils.h"
+#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
@@ -95,6 +96,8 @@ class PipelineData {
graph_zone_(graph_zone_scope_.zone()),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
PhaseScope scope(pipeline_statistics, "init pipeline data");
@@ -112,7 +115,7 @@ class PipelineData {
is_asm_ = info->shared_info()->asm_function();
}
- // For WASM compile entry point.
+ // For WebAssembly compile entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
PipelineStatistics* pipeline_statistics,
SourcePositionTable* source_positions,
@@ -132,6 +135,8 @@ class PipelineData {
jsgraph_(jsgraph),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
protected_instructions_(protected_instructions) {
@@ -152,6 +157,8 @@ class PipelineData {
schedule_(schedule),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
is_asm_ = false;
@@ -167,6 +174,8 @@ class PipelineData {
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(sequence->zone()),
sequence_(sequence),
+ codegen_zone_scope_(zone_stats_, ZONE_NAME),
+ codegen_zone_(codegen_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()) {
is_asm_ =
@@ -178,6 +187,7 @@ class PipelineData {
code_generator_ = nullptr;
DeleteRegisterAllocationZone();
DeleteInstructionZone();
+ DeleteCodegenZone();
DeleteGraphZone();
}
@@ -185,6 +195,7 @@ class PipelineData {
CompilationInfo* info() const { return info_; }
ZoneStats* zone_stats() const { return zone_stats_; }
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
+ OsrHelper* osr_helper() { return &(*osr_helper_); }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
@@ -231,6 +242,7 @@ class PipelineData {
void reset_schedule() { schedule_ = nullptr; }
Zone* instruction_zone() const { return instruction_zone_; }
+ Zone* codegen_zone() const { return codegen_zone_; }
InstructionSequence* sequence() const { return sequence_; }
Frame* frame() const { return frame_; }
@@ -276,6 +288,12 @@ class PipelineData {
instruction_zone_scope_.Destroy();
instruction_zone_ = nullptr;
sequence_ = nullptr;
+ }
+
+ void DeleteCodegenZone() {
+ if (codegen_zone_ == nullptr) return;
+ codegen_zone_scope_.Destroy();
+ codegen_zone_ = nullptr;
frame_ = nullptr;
}
@@ -307,7 +325,7 @@ class PipelineData {
if (descriptor != nullptr) {
fixed_frame_size = descriptor->CalculateFixedFrameSize();
}
- frame_ = new (instruction_zone()) Frame(fixed_frame_size);
+ frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
void InitializeRegisterAllocationData(const RegisterConfiguration* config,
@@ -318,9 +336,21 @@ class PipelineData {
sequence(), debug_name());
}
+ void InitializeOsrHelper() {
+ DCHECK(!osr_helper_.has_value());
+ osr_helper_.emplace(info());
+ }
+
+ void set_start_source_position(int position) {
+ DCHECK_EQ(start_source_position_, kNoSourcePosition);
+ start_source_position_ = position;
+ }
+
void InitializeCodeGenerator(Linkage* linkage) {
DCHECK_NULL(code_generator_);
- code_generator_ = new CodeGenerator(frame(), linkage, sequence(), info());
+ code_generator_ =
+ new CodeGenerator(codegen_zone(), frame(), linkage, sequence(), info(),
+ osr_helper_, start_source_position_);
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -347,6 +377,8 @@ class PipelineData {
bool compilation_failed_ = false;
bool verify_graph_ = false;
bool is_asm_ = false;
+ int start_source_position_ = kNoSourcePosition;
+ base::Optional<OsrHelper> osr_helper_;
Handle<Code> code_ = Handle<Code>::null();
CodeGenerator* code_generator_ = nullptr;
@@ -365,15 +397,21 @@ class PipelineData {
Schedule* schedule_ = nullptr;
// All objects in the following group of fields are allocated in
- // instruction_zone_. They are all set to nullptr when the instruction_zone_
+ // instruction_zone_. They are all set to nullptr when the instruction_zone_
// is destroyed.
ZoneStats::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_ = nullptr;
+
+ // All objects in the following group of fields are allocated in
+ // codegen_zone_. They are all set to nullptr when the codegen_zone_
+ // is destroyed.
+ ZoneStats::Scope codegen_zone_scope_;
+ Zone* codegen_zone_;
Frame* frame_ = nullptr;
// All objects in the following group of fields are allocated in
- // register_allocation_zone_. They are all set to nullptr when the zone is
+ // register_allocation_zone_. They are all set to nullptr when the zone is
// destroyed.
ZoneStats::Scope register_allocation_zone_scope_;
Zone* register_allocation_zone_;
@@ -469,6 +507,8 @@ class SourcePositionWrapper final : public Reducer {
: reducer_(reducer), table_(table) {}
~SourcePositionWrapper() final {}
+ const char* reducer_name() const override { return reducer_->reducer_name(); }
+
Reduction Reduce(Node* node) final {
SourcePosition const pos = table_->GetSourcePosition(node);
SourcePositionTable::Scope position(table_, pos);
@@ -576,6 +616,9 @@ class PipelineCompilationJob final : public CompilationJob {
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
+ // Registers weak object to optimized code dependencies.
+ void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
private:
std::unique_ptr<ParseInfo> parse_info_;
ZoneStats zone_stats_;
@@ -602,9 +645,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
info()->MarkAsLoopPeelingEnabled();
}
}
- if (info()->is_optimizing_from_bytecode() ||
- !info()->shared_info()->asm_function()) {
+ if (info()->is_optimizing_from_bytecode()) {
info()->MarkAsDeoptimizationEnabled();
+ if (FLAG_turbo_inlining) {
+ info()->MarkAsInliningEnabled();
+ }
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
@@ -612,13 +657,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
isolate()->heap()->one_closure_cell_map()) {
info()->MarkAsFunctionContextSpecializing();
}
- }
- if (!info()->is_optimizing_from_bytecode()) {
- if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
- } else if (FLAG_turbo_inlining) {
info()->MarkAsInliningEnabled();
}
+ data_.set_start_source_position(info()->shared_info()->start_position());
+
linkage_ = new (info()->zone())
Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
@@ -627,6 +670,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
return AbortOptimization(kGraphBuildingFailed);
}
+ if (info()->is_osr()) data_.InitializeOsrHelper();
+
// Make sure that we have generated the maximal number of deopt entries.
// This is in order to avoid triggering the generation of deopt entries later
// during code assembly.
@@ -637,11 +682,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
if (!pipeline_.OptimizeGraph(linkage_)) return FAILED;
+ pipeline_.AssembleCode(linkage_);
return SUCCEEDED;
}
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
- pipeline_.AssembleCode(linkage_);
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
@@ -658,13 +703,70 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
return SUCCEEDED;
}
+namespace {
+
+void AddWeakObjectToCodeDependency(Isolate* isolate, Handle<HeapObject> object,
+ Handle<Code> code) {
+ Handle<WeakCell> cell = Code::WeakCellFor(code);
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*object)) {
+ heap->AddWeakNewSpaceObjectToCodeDependency(object, cell);
+ } else {
+ Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(object));
+ dep =
+ DependentCode::InsertWeakCode(dep, DependentCode::kWeakCodeGroup, cell);
+ heap->AddWeakObjectToCodeDependency(object, dep);
+ }
+}
+
+} // namespace
+
+void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
+ Handle<Code> code) {
+ DCHECK(code->is_optimized_code());
+ std::vector<Handle<Map>> maps;
+ std::vector<Handle<HeapObject>> objects;
+ {
+ DisallowHeapAllocation no_gc;
+ int const mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::CELL);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::CELL &&
+ code->IsWeakObjectInOptimizedCode(it.rinfo()->target_cell())) {
+ objects.push_back(handle(it.rinfo()->target_cell(), isolate()));
+ } else if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ code->IsWeakObjectInOptimizedCode(
+ it.rinfo()->target_object())) {
+ Handle<HeapObject> object(HeapObject::cast(it.rinfo()->target_object()),
+ isolate());
+ if (object->IsMap()) {
+ maps.push_back(Handle<Map>::cast(object));
+ } else {
+ objects.push_back(object);
+ }
+ }
+ }
+ }
+ for (Handle<Map> map : maps) {
+ if (map->dependent_code()->IsEmpty(DependentCode::kWeakCodeGroup)) {
+ isolate()->heap()->AddRetainedMap(map);
+ }
+ Map::AddDependentCode(map, DependentCode::kWeakCodeGroup, code);
+ }
+ for (Handle<HeapObject> object : objects) {
+ AddWeakObjectToCodeDependency(isolate(), object, code);
+ }
+ code->set_can_have_weak_objects(true);
+}
+
class PipelineWasmCompilationJob final : public CompilationJob {
public:
explicit PipelineWasmCompilationJob(
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
- bool allow_signalling_nan)
+ wasm::ModuleOrigin wasm_origin)
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
@@ -673,7 +775,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
source_positions, protected_insts),
pipeline_(&data_),
linkage_(descriptor),
- allow_signalling_nan_(allow_signalling_nan) {}
+ wasm_origin_(wasm_origin) {}
protected:
Status PrepareJobImpl() final;
@@ -688,7 +790,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
- bool allow_signalling_nan_;
+ wasm::ModuleOrigin wasm_origin_;
};
PipelineWasmCompilationJob::Status
@@ -706,15 +808,15 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
pipeline_.RunPrintAndVerify("Machine", true);
- if (FLAG_wasm_opt) {
+ if (FLAG_wasm_opt || wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin) {
PipelineData* data = &data_;
- PipelineRunScope scope(data, "WASM optimization");
+ PipelineRunScope scope(data, "Wasm optimization");
JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
- MachineOperatorReducer machine_reducer(data->jsgraph(),
- allow_signalling_nan_);
+ MachineOperatorReducer machine_reducer(
+ data->jsgraph(), wasm_origin_ == wasm::ModuleOrigin::kAsmJsOrigin);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
@@ -726,6 +828,7 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
+ pipeline_.AssembleCode(&linkage_);
return SUCCEEDED;
}
@@ -735,7 +838,6 @@ size_t PipelineWasmCompilationJob::AllocatedMemory() const {
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::FinalizeJobImpl() {
- pipeline_.AssembleCode(&linkage_);
pipeline_.FinalizeCode();
return SUCCEEDED;
}
@@ -778,10 +880,8 @@ struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- bool succeeded = false;
-
if (data->info()->is_optimizing_from_bytecode()) {
- // Bytecode graph builder assumes deoptimziation is enabled.
+ // Bytecode graph builder assumes deoptimization is enabled.
DCHECK(data->info()->is_deoptimization_enabled());
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags;
if (data->info()->is_bailout_on_uninitialized()) {
@@ -792,16 +892,16 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector()),
data->info()->osr_ast_id(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), SourcePosition::kNotInlined, flags);
- succeeded = graph_builder.CreateGraph();
+ graph_builder.CreateGraph();
} else {
+ // AST-based graph builder assumes deoptimization is disabled.
+ DCHECK(!data->info()->is_deoptimization_enabled());
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), CallFrequency(1.0f),
data->loop_assignment(), data->source_positions());
- succeeded = graph_builder.CreateGraph();
- }
-
- if (!succeeded) {
- data->set_compilation_failed();
+ if (!graph_builder.CreateGraph()) {
+ data->set_compilation_failed();
+ }
}
}
};
@@ -841,7 +941,11 @@ struct InliningPhase {
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
+ CheckElimination check_elimination(&graph_reducer, data->jsgraph());
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
+ data->info()->is_bailout_on_uninitialized()
+ ? JSCallReducer::kBailoutOnUninitialized
+ : JSCallReducer::kNoFlags,
data->native_context(),
data->info()->dependencies());
JSContextSpecialization context_specialization(
@@ -875,6 +979,7 @@ struct InliningPhase {
: JSIntrinsicLowering::kDeoptimizationDisabled);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
+ AddReducer(data, &graph_reducer, &check_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
@@ -912,6 +1017,7 @@ struct UntyperPhase {
void Run(PipelineData* data, Zone* temp_zone) {
class RemoveTypeReducer final : public Reducer {
public:
+ const char* reducer_name() const override { return "RemoveTypeReducer"; }
Reduction Reduce(Node* node) final {
if (NodeProperties::IsTyped(node)) {
NodeProperties::RemoveType(node);
@@ -943,6 +1049,7 @@ struct OsrDeconstructionPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
+ // TODO(neis): Use data->osr_helper() here once AST graph builder is gone.
OsrHelper osr_helper(data->info());
osr_helper.Deconstruct(data->jsgraph(), data->common(), temp_zone);
}
@@ -1062,6 +1169,10 @@ struct ConcurrentOptimizationPrepPhase {
data->jsgraph()->CEntryStubConstant(2);
data->jsgraph()->CEntryStubConstant(3);
+ // TODO(turbofan): Remove this line once the Array constructor code
+ // is a proper builtin and no longer a CodeStub.
+ data->jsgraph()->ArrayConstructorStubConstant();
+
// This is needed for escape analysis.
NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
@@ -1234,13 +1345,11 @@ struct LateOptimizationPhase {
data->common(), data->machine());
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
- TailCallOptimization tco(data->common(), data->graph());
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &select_lowering);
- AddReducer(data, &graph_reducer, &tco);
AddReducer(data, &graph_reducer, &value_numbering);
graph_reducer.ReduceGraph();
}
@@ -1815,10 +1924,10 @@ CompilationJob* Pipeline::NewWasmCompilationJob(
CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
- bool allow_signalling_nan) {
- return new PipelineWasmCompilationJob(
- info, jsgraph, descriptor, source_positions, protected_instructions,
- allow_signalling_nan);
+ wasm::ModuleOrigin wasm_origin) {
+ return new PipelineWasmCompilationJob(info, jsgraph, descriptor,
+ source_positions,
+ protected_instructions, wasm_origin);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1936,6 +2045,7 @@ void PipelineImpl::AssembleCode(Linkage* linkage) {
data->BeginPhaseKind("code generation");
data->InitializeCodeGenerator(linkage);
Run<AssembleCodePhase>();
+ data->DeleteInstructionZone();
}
Handle<Code> PipelineImpl::FinalizeCode() {
@@ -2012,11 +2122,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
#endif
data->InitializeRegisterAllocationData(config, descriptor);
- if (info()->is_osr()) {
- AllowHandleDereference allow_deref;
- OsrHelper osr_helper(info());
- osr_helper.SetupFrame(data->frame());
- }
+ if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
Run<MeetRegisterConstraintsPhase>();
Run<ResolvePhisPhase>();
@@ -2048,6 +2154,14 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
+
+ // TODO(chromium:725559): remove this check once
+ // we understand the cause of the bug. We keep just the
+ // check at the end of the allocation.
+ if (verifier != nullptr) {
+ verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
+ }
+
Run<PopulateReferenceMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
@@ -2066,7 +2180,7 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
}
if (verifier != nullptr) {
- verifier->VerifyAssignment();
+ verifier->VerifyAssignment("End of regalloc pipeline.");
verifier->VerifyGapMoves();
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 624ef01ead..8748e3389a 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -22,6 +22,10 @@ namespace trap_handler {
struct ProtectedInstructionData;
} // namespace trap_handler
+namespace wasm {
+enum ModuleOrigin : uint8_t;
+} // namespace wasm
+
namespace compiler {
class CallDescriptor;
@@ -43,7 +47,7 @@ class Pipeline : public AllStatic {
SourcePositionTable* source_positions,
ZoneVector<trap_handler::ProtectedInstructionData>*
protected_instructions,
- bool wasm_origin);
+ wasm::ModuleOrigin wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index be10a67f24..fe7d3ab40d 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -9,14 +9,14 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/double.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
-
+#define __ tasm()->
#define kScratchReg r11
@@ -40,7 +40,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return LeaveRC;
}
UNREACHABLE();
- return LeaveRC;
}
bool CompareLogical() const {
@@ -54,7 +53,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return false;
}
UNREACHABLE();
- return false;
}
Operand InputImmediate(size_t index) {
@@ -63,11 +61,9 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
return Operand(constant.ToInt64());
@@ -78,7 +74,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
@@ -95,7 +90,6 @@ class PPCOperandConverter final : public InstructionOperandConverter {
return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
@@ -128,8 +122,8 @@ class OutOfLineLoadNAN32 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
- kScratchReg);
+ __ LoadDoubleLiteral(
+ result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
}
private:
@@ -143,8 +137,8 @@ class OutOfLineLoadNAN64 final : public OutOfLineCode {
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
- kScratchReg);
+ __ LoadDoubleLiteral(
+ result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
}
private:
@@ -177,7 +171,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -190,7 +185,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -209,8 +205,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ mflr(scratch1_);
__ Push(scratch1_);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ addi(scratch1_, object_, Operand(offset_immediate_));
} else {
@@ -218,10 +212,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ CallStub(&stub);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
} else {
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
@@ -239,6 +237,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
@@ -293,7 +292,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
} // namespace
@@ -431,28 +429,27 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
i.OutputRCBit()); \
} while (0)
-
-#define ASSEMBLE_FLOAT_MODULO() \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
- 0, 2); \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
@@ -462,12 +459,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
@@ -845,20 +842,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->Push((*pending_pushes)[0]);
+ tasm->Push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -869,18 +866,18 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -888,15 +885,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
+ tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -919,20 +916,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -940,15 +937,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -962,15 +959,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
switch (opcode) {
case kArchCallCodeObject: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
EnsureSpaceForLazyDeopt();
if (HasRegisterInput(instr, 0)) {
__ addi(ip, i.InputRegister(0),
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
} else {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -991,9 +987,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
DCHECK_EQ(LeaveRC, i.OutputRCBit());
frame_access_state()->ClearSPDelta();
@@ -1009,7 +1004,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallJSFunction: {
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
EnsureSpaceForLazyDeopt();
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
@@ -1122,7 +1117,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kArchStoreWithWriteBarrier: {
@@ -1383,8 +1379,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
LeaveOE, i.OutputRCBit());
} else {
- __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
- DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ if (is_int16(i.InputImmediate(1).immediate())) {
+ __ subi(i.OutputRegister(), i.InputRegister(0),
+ i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ } else {
+ __ mov(kScratchReg, i.InputImmediate(1));
+ __ sub(i.OutputRegister(), i.InputRegister(0), kScratchReg, LeaveOE,
+ i.OutputRCBit());
+ }
}
#if V8_TARGET_ARCH_PPC64
}
@@ -1556,8 +1559,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@@ -2079,14 +2082,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2219,12 +2222,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2296,7 +2299,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
@@ -2424,12 +2427,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -2455,31 +2456,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value;
+ Double value;
#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
// casting double precision snan to single precision
// converts it to qnan on ia32/x64
if (src.type() == Constant::kFloat32) {
- int32_t val = src.ToFloat32AsInt();
+ uint32_t val = src.ToFloat32AsInt();
if ((val & 0x7f800000) == 0x7f800000) {
- int64_t dval = static_cast<int64_t>(val);
+ uint64_t dval = static_cast<uint64_t>(val);
dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
- value = bit_cast<double, int64_t>(dval);
+ value = Double(dval);
} else {
- value = src.ToFloat32();
+ value = Double(static_cast<double>(src.ToFloat32()));
}
} else {
- int64_t val = src.ToFloat64AsInt();
- if ((val & 0x7f80000000000000) == 0x7f80000000000000) {
- value = bit_cast<double, int64_t>(val);
- } else {
- value = src.ToFloat64();
- }
+ value = Double(src.ToFloat64());
}
#else
- value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ value = src.type() == Constant::kFloat32
+ ? Double(static_cast<double>(src.ToFloat32()))
+ : Double(src.ToFloat64());
#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
@@ -2611,11 +2608,11 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
// Block tramoline pool emission for duration of padding.
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
+ tasm());
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
while (padding_size > 0) {
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index 640a7e439a..2b491f1b80 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -141,7 +141,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index ea88e81a05..ff7cde50fd 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -224,9 +224,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -336,9 +333,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -403,9 +397,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -454,9 +445,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1544,7 +1532,6 @@ static bool CompareLogical(FlagsContinuation* cont) {
return false;
}
UNREACHABLE();
- return false;
}
@@ -1837,6 +1824,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -1844,7 +1832,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
new file mode 100644
index 0000000000..417f541bca
--- /dev/null
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -0,0 +1,271 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/property-access-builder.h"
+
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/lookup.h"
+
+#include "src/field-index-inl.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph* PropertyAccessBuilder::graph() const { return jsgraph()->graph(); }
+
+Isolate* PropertyAccessBuilder::isolate() const { return jsgraph()->isolate(); }
+
+CommonOperatorBuilder* PropertyAccessBuilder::common() const {
+ return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const {
+ return jsgraph()->simplified();
+}
+
+namespace {
+
+bool HasOnlyNumberMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
+ }
+ return true;
+}
+
+bool HasOnlyStringMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (!map->IsStringMap()) return false;
+ }
+ return true;
+}
+
+bool HasOnlySequentialStringMaps(MapHandles const& maps) {
+ for (auto map : maps) {
+ if (!map->IsStringMap()) return false;
+ if (!StringShape(map->instance_type()).IsSequential()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
+ Node** receiver, Node** effect,
+ Node* control) {
+ if (HasOnlyStringMaps(maps)) {
+ if (HasOnlySequentialStringMaps(maps)) {
+ *receiver = *effect = graph()->NewNode(simplified()->CheckSeqString(),
+ *receiver, *effect, control);
+ } else {
+ // Monormorphic string access (ignoring the fact that there are multiple
+ // String maps).
+ *receiver = *effect = graph()->NewNode(simplified()->CheckString(),
+ *receiver, *effect, control);
+ }
+ return true;
+ }
+ return false;
+}
+
+bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
+ Node** receiver, Node** effect,
+ Node* control) {
+ if (HasOnlyNumberMaps(maps)) {
+ // Monomorphic number access (we also deal with Smis here).
+ *receiver = *effect = graph()->NewNode(simplified()->CheckNumber(),
+ *receiver, *effect, control);
+ return true;
+ }
+ return false;
+}
+
+Node* PropertyAccessBuilder::BuildCheckHeapObject(Node* receiver, Node** effect,
+ Node* control) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSTypeOf: {
+ return receiver;
+ }
+ default: {
+ return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, *effect, control);
+ }
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+void PropertyAccessBuilder::BuildCheckMaps(
+ Node* receiver, Node** effect, Node* control,
+ std::vector<Handle<Map>> const& receiver_maps) {
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue()) {
+ Handle<Map> receiver_map(m.Value()->map(), isolate());
+ if (receiver_map->is_stable()) {
+ for (Handle<Map> map : receiver_maps) {
+ if (map.is_identical_to(receiver_map)) {
+ dependencies()->AssumeMapStable(receiver_map);
+ return;
+ }
+ }
+ }
+ }
+ ZoneHandleSet<Map> maps;
+ CheckMapsFlags flags = CheckMapsFlag::kNone;
+ for (Handle<Map> map : receiver_maps) {
+ maps.insert(map, graph()->zone());
+ if (map->is_migration_target()) {
+ flags |= CheckMapsFlag::kTryMigrateInstance;
+ }
+ }
+ *effect = graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ *effect, control);
+}
+
+void PropertyAccessBuilder::AssumePrototypesStable(
+ Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
+ // Determine actual holder and perform prototype chain checks.
+ for (auto map : receiver_maps) {
+ // Perform the implicit ToObject for primitives here.
+ // Implemented according to ES6 section 7.3.2 GetV (V, P).
+ Handle<JSFunction> constructor;
+ if (Map::GetConstructorFunction(map, native_context)
+ .ToHandle(&constructor)) {
+ map = handle(constructor->initial_map(), holder->GetIsolate());
+ }
+ dependencies()->AssumePrototypeMapsStable(map, holder);
+ }
+}
+
+Node* PropertyAccessBuilder::ResolveHolder(
+ PropertyAccessInfo const& access_info, Node* receiver) {
+ Handle<JSObject> holder;
+ if (access_info.holder().ToHandle(&holder)) {
+ return jsgraph()->Constant(holder);
+ }
+ return receiver;
+}
+
+Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
+ Handle<Name> name, PropertyAccessInfo const& access_info, Node* receiver) {
+ // Optimize immutable property loads.
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value()->IsJSObject()) {
+ // TODO(ishell): Use something simpler like
+ //
+ // Handle<Object> value =
+ // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+ // Representation::Tagged(), field_index);
+ //
+ // here, once we have the immutable bit in the access_info.
+
+ // TODO(turbofan): Given that we already have the field_index here, we
+ // might be smarter in the future and not rely on the LookupIterator,
+ // but for now let's just do what Crankshaft does.
+ LookupIterator it(m.Value(), name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.state() == LookupIterator::DATA) {
+ bool is_reaonly_non_configurable =
+ it.IsReadOnly() && !it.IsConfigurable();
+ if (is_reaonly_non_configurable ||
+ (FLAG_track_constant_fields && access_info.IsDataConstantField())) {
+ Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+ if (!is_reaonly_non_configurable) {
+ // It's necessary to add dependency on the map that introduced
+ // the field.
+ DCHECK(access_info.IsDataConstantField());
+ DCHECK(!it.is_dictionary_holder());
+ Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+ dependencies()->AssumeFieldOwner(field_owner_map);
+ }
+ return value;
+ }
+ }
+ }
+ return nullptr;
+}
+
+Node* PropertyAccessBuilder::BuildLoadDataField(
+ Handle<Name> name, PropertyAccessInfo const& access_info, Node* receiver,
+ Node** effect, Node** control) {
+ DCHECK(access_info.IsDataField() || access_info.IsDataConstantField());
+ receiver = ResolveHolder(access_info, receiver);
+ if (Node* value =
+ TryBuildLoadConstantDataField(name, access_info, receiver)) {
+ return value;
+ }
+
+ FieldIndex const field_index = access_info.field_index();
+ Type* const field_type = access_info.field_type();
+ MachineRepresentation const field_representation =
+ access_info.field_representation();
+ Node* storage = receiver;
+ if (!field_index.is_inobject()) {
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectProperties()),
+ storage, *effect, *control);
+ }
+ FieldAccess field_access = {
+ kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ field_type,
+ MachineType::TypeForRepresentation(field_representation),
+ kFullWriteBarrier};
+ if (field_representation == MachineRepresentation::kFloat64) {
+ if (!field_index.is_inobject() || field_index.is_hidden_field() ||
+ !FLAG_unbox_double_fields) {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_index.offset(),
+ name,
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ storage = *effect = graph()->NewNode(
+ simplified()->LoadField(storage_access), storage, *effect, *control);
+ field_access.offset = HeapNumber::kValueOffset;
+ field_access.name = MaybeHandle<Name>();
+ }
+ } else if (field_representation == MachineRepresentation::kTaggedPointer) {
+ // Remember the map of the field value, if its map is stable. This is
+ // used by the LoadElimination to eliminate map checks on the result.
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ if (field_map->is_stable()) {
+ dependencies()->AssumeMapStable(field_map);
+ field_access.map = field_map;
+ }
+ }
+ }
+ Node* value = *effect = graph()->NewNode(
+ simplified()->LoadField(field_access), storage, *effect, *control);
+ return value;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/property-access-builder.h b/deps/v8/src/compiler/property-access-builder.h
new file mode 100644
index 0000000000..2774423b4c
--- /dev/null
+++ b/deps/v8/src/compiler/property-access-builder.h
@@ -0,0 +1,80 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
+#define V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
+
+#include <vector>
+
+#include "src/handles.h"
+#include "src/objects/map.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class CompilationDependencies;
+
+namespace compiler {
+
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class Node;
+class PropertyAccessInfo;
+class SimplifiedOperatorBuilder;
+
+class PropertyAccessBuilder {
+ public:
+ PropertyAccessBuilder(JSGraph* jsgraph, CompilationDependencies* dependencies)
+ : jsgraph_(jsgraph), dependencies_(dependencies) {}
+
+ // Builds the appropriate string check if the maps are only string
+ // maps.
+ bool TryBuildStringCheck(MapHandles const& maps, Node** receiver,
+ Node** effect, Node* control);
+ // Builds a number check if all maps are number maps.
+ bool TryBuildNumberCheck(MapHandles const& maps, Node** receiver,
+ Node** effect, Node* control);
+
+ Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+ void BuildCheckMaps(Node* receiver, Node** effect, Node* control,
+ std::vector<Handle<Map>> const& receiver_maps);
+
+ // Adds stability dependencies on all prototypes of every class in
+ // {receiver_type} up to (and including) the {holder}.
+ void AssumePrototypesStable(Handle<Context> native_context,
+ std::vector<Handle<Map>> const& receiver_maps,
+ Handle<JSObject> holder);
+
+ // Builds the actual load for data-field and data-constant-field
+ // properties (without heap-object or map checks).
+ Node* BuildLoadDataField(Handle<Name> name,
+ PropertyAccessInfo const& access_info,
+ Node* receiver, Node** effect, Node** control);
+
+ private:
+ JSGraph* jsgraph() const { return jsgraph_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Graph* graph() const;
+ Isolate* isolate() const;
+ CommonOperatorBuilder* common() const;
+ SimplifiedOperatorBuilder* simplified() const;
+
+ Node* TryBuildLoadConstantDataField(Handle<Name> name,
+ PropertyAccessInfo const& access_info,
+ Node* receiver);
+ // Returns a node with the holder for the property access described by
+ // {access_info}.
+ Node* ResolveHolder(PropertyAccessInfo const& access_info, Node* receiver);
+
+ JSGraph* jsgraph_;
+ CompilationDependencies* dependencies_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_PROPERTY_ACCESS_BUILDER_H_
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 671aafe381..6134f934c7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -405,9 +405,17 @@ Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
}
RawMachineLabel::~RawMachineLabel() {
- // If this DCHECK fails, it means that the label has been bound but it's not
- // used, or the opposite. This would cause the register allocator to crash.
- DCHECK_EQ(bound_, used_);
+#if DEBUG
+ if (bound_ == used_) return;
+ std::stringstream str;
+ if (bound_) {
+ str << "A label has been bound but it's not used."
+ << "\n# label: " << *block_;
+ } else {
+ str << "A label has been used but it's not bound.";
+ }
+ FATAL(str.str().c_str());
+#endif // DEBUG
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 38feb8b751..666cdd4f58 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -27,7 +27,9 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckReceiver:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
- case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckSeqString:
+ case IrOpcode::kCheckNonEmptyString:
+ case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
@@ -123,9 +125,11 @@ namespace {
bool IsCompatibleCheck(Node const* a, Node const* b) {
if (a->op() != b->op()) {
- if (a->opcode() == IrOpcode::kCheckInternalizedString &&
- b->opcode() == IrOpcode::kCheckString) {
- // CheckInternalizedString(node) implies CheckString(node)
+ if (b->opcode() == IrOpcode::kCheckString &&
+ (a->opcode() == IrOpcode::kCheckInternalizedString ||
+ a->opcode() == IrOpcode::kCheckSeqString ||
+ a->opcode() == IrOpcode::kCheckNonEmptyString)) {
+ // Check[Internalized,Seq,NonEmpty]String(node) implies CheckString(node)
} else {
return false;
}
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 786c9608df..05094a388e 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -16,6 +16,8 @@ class RedundancyElimination final : public AdvancedReducer {
RedundancyElimination(Editor* editor, Zone* zone);
~RedundancyElimination() final;
+ const char* reducer_name() const override { return "RedundancyElimination"; }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index d589a9d371..d4614cd6f1 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -18,7 +18,6 @@ size_t OperandCount(const Instruction* instr) {
return instr->InputCount() + instr->OutputCount() + instr->TempCount();
}
-
void VerifyEmptyGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -28,8 +27,7 @@ void VerifyEmptyGaps(const Instruction* instr) {
}
}
-
-void VerifyAllocatedGaps(const Instruction* instr) {
+void VerifyAllocatedGaps(const Instruction* instr, const char* caller_info) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
@@ -38,8 +36,10 @@ void VerifyAllocatedGaps(const Instruction* instr) {
if (moves == nullptr) continue;
for (const MoveOperands* move : *moves) {
if (move->IsRedundant()) continue;
- CHECK(move->source().IsAllocated() || move->source().IsConstant());
- CHECK(move->destination().IsAllocated());
+ CHECK_WITH_MSG(
+ move->source().IsAllocated() || move->source().IsConstant(),
+ caller_info);
+ CHECK_WITH_MSG(move->destination().IsAllocated(), caller_info);
}
}
}
@@ -114,13 +114,14 @@ void RegisterAllocatorVerifier::VerifyOutput(
constraint.virtual_register_);
}
-void RegisterAllocatorVerifier::VerifyAssignment() {
+void RegisterAllocatorVerifier::VerifyAssignment(const char* caller_info) {
+ caller_info_ = caller_info;
CHECK(sequence()->instructions().size() == constraints()->size());
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
const Instruction* instr = instr_constraint.instruction_;
// All gaps should be totally allocated at this point.
- VerifyAllocatedGaps(instr);
+ VerifyAllocatedGaps(instr, caller_info_);
const size_t operand_count = instr_constraint.operand_constaints_size_;
const OperandConstraint* op_constraints =
instr_constraint.operand_constraints_;
@@ -211,12 +212,12 @@ void RegisterAllocatorVerifier::CheckConstraint(
const InstructionOperand* op, const OperandConstraint* constraint) {
switch (constraint->type_) {
case kConstant:
- CHECK(op->IsConstant());
+ CHECK_WITH_MSG(op->IsConstant(), caller_info_);
CHECK_EQ(ConstantOperand::cast(op)->virtual_register(),
constraint->value_);
return;
case kImmediate: {
- CHECK(op->IsImmediate());
+ CHECK_WITH_MSG(op->IsImmediate(), caller_info_);
const ImmediateOperand* imm = ImmediateOperand::cast(op);
int value = imm->type() == ImmediateOperand::INLINE
? imm->inline_value()
@@ -225,40 +226,40 @@ void RegisterAllocatorVerifier::CheckConstraint(
return;
}
case kRegister:
- CHECK(op->IsRegister());
+ CHECK_WITH_MSG(op->IsRegister(), caller_info_);
return;
case kFPRegister:
- CHECK(op->IsFPRegister());
+ CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
return;
case kExplicit:
- CHECK(op->IsExplicit());
+ CHECK_WITH_MSG(op->IsExplicit(), caller_info_);
return;
case kFixedRegister:
case kRegisterAndSlot:
- CHECK(op->IsRegister());
+ CHECK_WITH_MSG(op->IsRegister(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedFPRegister:
- CHECK(op->IsFPRegister());
+ CHECK_WITH_MSG(op->IsFPRegister(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->register_code(), constraint->value_);
return;
case kFixedSlot:
- CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsStackSlot() || op->IsFPStackSlot(), caller_info_);
CHECK_EQ(LocationOperand::cast(op)->index(), constraint->value_);
return;
case kSlot:
- CHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsStackSlot() || op->IsFPStackSlot(), caller_info_);
CHECK_EQ(ElementSizeLog2Of(LocationOperand::cast(op)->representation()),
constraint->value_);
return;
case kNone:
- CHECK(op->IsRegister() || op->IsStackSlot());
+ CHECK_WITH_MSG(op->IsRegister() || op->IsStackSlot(), caller_info_);
return;
case kNoneFP:
- CHECK(op->IsFPRegister() || op->IsFPStackSlot());
+ CHECK_WITH_MSG(op->IsFPRegister() || op->IsFPStackSlot(), caller_info_);
return;
case kSameAsFirst:
- CHECK(false);
+ CHECK_WITH_MSG(false, caller_info_);
return;
}
}
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 989589e6fb..bc2de1a9f1 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -167,7 +167,7 @@ class RegisterAllocatorVerifier final : public ZoneObject {
RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
const InstructionSequence* sequence);
- void VerifyAssignment();
+ void VerifyAssignment(const char* caller_info);
void VerifyGapMoves();
private:
@@ -257,6 +257,8 @@ class RegisterAllocatorVerifier final : public ZoneObject {
Constraints constraints_;
ZoneMap<RpoNumber, BlockAssessments*> assessments_;
ZoneMap<RpoNumber, DelayedAssessments*> outstanding_assessments_;
+ // TODO(chromium:725559): remove after we understand this bug's root cause.
+ const char* caller_info_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
};
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index f9c076d951..f5d43761d2 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -88,15 +88,10 @@ int GetByteWidth(MachineRepresentation rep) {
return kDoubleSize;
case MachineRepresentation::kSimd128:
return kSimd128Size;
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
- return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return 0;
}
} // namespace
@@ -320,7 +315,6 @@ bool UsePosition::HintRegister(int* register_code) const {
}
}
UNREACHABLE();
- return false;
}
@@ -344,7 +338,6 @@ UsePositionHintType UsePosition::HintTypeForOperand(
break;
}
UNREACHABLE();
- return UsePositionHintType::kNone;
}
void UsePosition::SetHint(UsePosition* use_pos) {
@@ -1780,7 +1773,8 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
- cur_input->set_virtual_register(second_output->virtual_register());
+ *cur_input =
+ UnallocatedOperand(*cur_input, second_output->virtual_register());
MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
input_copy, *cur_input);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
@@ -3576,6 +3570,7 @@ void OperandAssigner::CommitAssignment() {
for (LiveRange* range = top_range; range != nullptr;
range = range->next()) {
InstructionOperand assigned = range->GetAssignedOperand();
+ DCHECK(!assigned.IsUnallocated());
range->ConvertUsesToOperand(assigned, spill_operand);
}
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 7698a90387..308bdfe3a3 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -5,6 +5,7 @@
#ifndef V8_REGISTER_ALLOCATOR_H_
#define V8_REGISTER_ALLOCATOR_H_
+#include "src/base/bits.h"
#include "src/base/compiler-specific.h"
#include "src/compiler/instruction.h"
#include "src/globals.h"
@@ -159,8 +160,8 @@ class LifetimePosition final {
static const int kHalfStep = 2;
static const int kStep = 2 * kHalfStep;
- // Code relies on kStep and kHalfStep being a power of two.
- STATIC_ASSERT(IS_POWER_OF_TWO(kHalfStep));
+ static_assert(base::bits::IsPowerOfTwo(kHalfStep),
+ "Code relies on kStep and kHalfStep being a power of two");
explicit LifetimePosition(int value) : value_(value) {}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index f15df671cf..eee75bdd6f 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -42,7 +42,6 @@ const char* Truncation::description() const {
}
}
UNREACHABLE();
- return nullptr;
}
@@ -114,7 +113,6 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kAny;
}
UNREACHABLE();
- return false;
}
// static
@@ -196,14 +194,10 @@ Node* RepresentationChanger::GetRepresentationFor(
DCHECK(use_info.type_check() == TypeCheckKind::kNone);
return GetWord64RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
return node;
}
UNREACHABLE();
- return nullptr;
}
Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
@@ -677,22 +671,11 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
- } else if (output_rep == MachineRepresentation::kTaggedSigned) {
- if (output_type->Is(Type::Signed32())) {
+ } else if (IsAnyTagged(output_rep)) {
+ if (output_rep == MachineRepresentation::kTaggedSigned &&
+ output_type->Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedSignedToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
- } else {
- op = simplified()->TruncateTaggedToWord32();
- }
- } else {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kWord32);
- }
- } else if (output_rep == MachineRepresentation::kTagged ||
- output_rep == MachineRepresentation::kTaggedPointer) {
- if (output_type->Is(Type::Signed32())) {
+ } else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedSignedToInt32();
@@ -706,8 +689,12 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (use_info.truncation().IsUsedAsWord32()) {
if (output_type->Is(Type::NumberOrOddball())) {
op = simplified()->TruncateTaggedToWord32();
- } else if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
+ } else if (use_info.type_check() == TypeCheckKind::kNumber) {
+ op = simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumber);
+ } else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
+ op = simplified()->CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode::kNumberOrOddball);
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -729,8 +716,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
}
- } else {
- DCHECK_EQ(TypeCheckKind::kNumberOrOddball, use_info.type_check());
+ } else if (use_info.type_check() == TypeCheckKind::kNumber ||
+ use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
return node;
}
} else if (output_rep == MachineRepresentation::kWord8 ||
@@ -876,7 +863,6 @@ const Operator* RepresentationChanger::Int32OperatorFor(
return machine()->Int32LessThanOrEqual();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -893,7 +879,6 @@ const Operator* RepresentationChanger::Int32OverflowOperatorFor(
return simplified()->CheckedInt32Mod();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -911,7 +896,6 @@ const Operator* RepresentationChanger::TaggedSignedOperatorFor(
: machine()->Word64Equal();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -946,7 +930,6 @@ const Operator* RepresentationChanger::Uint32OperatorFor(
return machine()->Int32Mul();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -959,7 +942,6 @@ const Operator* RepresentationChanger::Uint32OverflowOperatorFor(
return simplified()->CheckedUint32Mod();
default:
UNREACHABLE();
- return nullptr;
}
}
@@ -1052,7 +1034,6 @@ const Operator* RepresentationChanger::Float64OperatorFor(
return machine()->Float64SilenceNaN();
default:
UNREACHABLE();
- return nullptr;
}
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index b4f3366d42..bd86cd34db 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -132,7 +132,6 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "HeapObject";
}
UNREACHABLE();
- return os;
}
// The {UseInfo} class is used to describe a use of an input of a node.
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f46740c9ae..4470b544fe 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
+#define __ tasm()->
#define kScratchReg ip
@@ -48,7 +48,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
return false;
}
UNREACHABLE();
- return false;
}
Operand InputImmediate(size_t index) {
@@ -57,11 +56,9 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kInt32:
return Operand(constant.ToInt32());
case Constant::kFloat32:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
- return Operand(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
return Operand(constant.ToInt64());
@@ -72,7 +69,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
break;
}
UNREACHABLE();
- return Operand::Zero();
}
MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
@@ -96,7 +92,6 @@ class S390OperandConverter final : public InstructionOperandConverter {
InputInt32(index + 2));
}
UNREACHABLE();
- return MemOperand(r0);
}
MemOperand MemoryOperand(AddressingMode* mode = NULL,
@@ -211,7 +206,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
Register value, Register scratch0, Register scratch1,
@@ -224,7 +220,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
- must_save_lr_(!gen->frame_access_state()->has_frame()) {}
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -242,15 +239,15 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
}
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
if (offset_.is(no_reg)) {
__ AddP(scratch1_, object_, Operand(offset_immediate_));
} else {
DCHECK_EQ(0, offset_immediate_);
__ AddP(scratch1_, object_, offset_);
}
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Pop(r14);
@@ -266,6 +263,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch1_;
RecordWriteMode const mode_;
bool must_save_lr_;
+ Zone* zone_;
};
Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
@@ -335,7 +333,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
break;
}
UNREACHABLE();
- return kNoCondition;
}
#define GET_MEMOPERAND32(ret, fi) \
@@ -467,7 +464,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
static int nullInstr() {
UNREACHABLE();
- return -1;
}
template <int numOfOperand, class RType, class MType, class IType>
@@ -481,7 +477,6 @@ static inline int AssembleOp(Instruction* instr, RType r, MType m, IType i) {
return i();
} else {
UNREACHABLE();
- return -1;
}
}
@@ -626,26 +621,26 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ LoadlW(i.OutputRegister(), r0); \
} while (0)
-#define ASSEMBLE_FLOAT_MODULO() \
- do { \
- FrameScope scope(masm(), StackFrame::MANUAL); \
- __ PrepareCallCFunction(0, 2, kScratchReg); \
- __ MovToFloatParameters(i.InputDoubleRegister(0), \
- i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
- 0, 2); \
- __ MovFromFloatResult(i.OutputDoubleRegister()); \
+#define ASSEMBLE_FLOAT_MODULO() \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ __ PrepareCallCFunction(0, 2, kScratchReg); \
+ __ MovToFloatParameters(i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ __ CallCFunction( \
+ ExternalReference::mod_two_doubles_operation(__ isolate()), 0, 2); \
+ __ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 1, kScratchReg); \
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 1); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
@@ -654,12 +649,12 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
/* and generate a CallAddress instruction instead. */ \
- FrameScope scope(masm(), StackFrame::MANUAL); \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
__ PrepareCallCFunction(0, 2, kScratchReg); \
__ MovToFloatParameters(i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 0, 2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 0, 2); \
/* Move the result in the double result register. */ \
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
@@ -1055,20 +1050,20 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void FlushPendingPushRegisters(MacroAssembler* masm,
+void FlushPendingPushRegisters(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes) {
switch (pending_pushes->size()) {
case 0:
break;
case 1:
- masm->Push((*pending_pushes)[0]);
+ tasm->Push((*pending_pushes)[0]);
break;
case 2:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
break;
case 3:
- masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
+ tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
(*pending_pushes)[2]);
break;
default:
@@ -1079,17 +1074,17 @@ void FlushPendingPushRegisters(MacroAssembler* masm,
pending_pushes->resize(0);
}
-void AddPendingPushRegister(MacroAssembler* masm,
+void AddPendingPushRegister(TurboAssembler* tasm,
FrameAccessState* frame_access_state,
ZoneVector<Register>* pending_pushes,
Register reg) {
pending_pushes->push_back(reg);
if (pending_pushes->size() == 3 || reg.is(ip)) {
- FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
+ FlushPendingPushRegisters(tasm, frame_access_state, pending_pushes);
}
}
void AdjustStackPointerForTailCall(
- MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
+ TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
ZoneVector<Register>* pending_pushes = nullptr,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
@@ -1097,15 +1092,15 @@ void AdjustStackPointerForTailCall(
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
if (pending_pushes != nullptr) {
- FlushPendingPushRegisters(masm, state, pending_pushes);
+ FlushPendingPushRegisters(tasm, state, pending_pushes);
}
- masm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
+ tasm->AddP(sp, sp, Operand(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -1128,20 +1123,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(
- masm(), frame_access_state(),
+ tasm(), frame_access_state(),
destination_location.index() - pending_pushes.size(),
&pending_pushes);
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ LoadP(ip, g.SlotToMemOperand(source_location.index()));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
source_location.GetRegister());
} else if (source.IsImmediate()) {
- AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
+ AddPendingPushRegister(tasm(), frame_access_state(), &pending_pushes,
ip);
} else {
// Pushes of non-scalar data types is not supported.
@@ -1149,15 +1144,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
}
move->Eliminate();
}
- FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
+ FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, nullptr, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -1180,8 +1175,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
} else {
- __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1201,9 +1195,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
// We cannot use the constant pool to load the target since
// we've already restored the caller's frame.
- ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
- RelocInfo::CODE_TARGET);
+ ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1315,7 +1308,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToIDelayed(zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
@@ -1781,8 +1775,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Move(d1, d3);
break;
}
@@ -2445,7 +2439,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
FlagsCondition condition = branch->condition;
Condition cond = FlagsConditionToCondition(condition, op);
- if (op == kS390_CmpDouble) {
+ if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
// check for unordered if necessary
// Branching to flabel/tlabel according to what's expected by tests
if (cond == le || cond == eq || cond == lt) {
@@ -2496,14 +2490,14 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We use the context register as the scratch register, because we do
// not have a context here.
__ PrepareCallCFunction(0, 0, cp);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
@@ -2526,14 +2520,12 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
ArchOpcode op = instr->arch_opcode();
Condition cond = FlagsConditionToCondition(condition, op);
- if (op == kS390_CmpDouble) {
+ if (op == kS390_CmpFloat || op == kS390_CmpDouble) {
// check for unordered if necessary
- if (cond == le) {
+ if (cond == le || cond == eq || cond == lt) {
__ bunordered(&end);
- // Unnecessary for eq/lt since only FU bit will be set.
- } else if (cond == gt) {
+ } else if (cond == gt || cond == ne || cond == ge) {
__ bunordered(tlabel);
- // Unnecessary for ne/ge since only FU bit will be set.
}
}
__ b(cond, tlabel);
@@ -2608,12 +2600,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
// TODO(turbofan): We should be able to generate better code by sharing the
// actual final call site and just bl'ing to it here, similar to what we do
// in the lithium backend.
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2674,7 +2666,7 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
+ shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
}
const RegList double_saves = descriptor->CalleeSavedFPRegisters();
@@ -2796,12 +2788,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
#endif // V8_TARGET_ARCH_S390X
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
case Constant::kExternalReference:
__ mov(dst, Operand(src.ToExternalReference()));
@@ -2827,8 +2817,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ double value = (src.type() == Constant::kFloat32)
+ ? src.ToFloat32()
+ : src.ToFloat64().value();
if (src.type() == Constant::kFloat32) {
__ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
} else {
@@ -2962,7 +2953,7 @@ void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
- int current_pc = masm()->pc_offset();
+ int current_pc = tasm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
DCHECK_EQ(0, padding_size % 2);
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 352e63af07..350f84b4bd 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -174,7 +174,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index f4e8ea13d2..e839d8cb1c 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -307,9 +307,6 @@ ArchOpcode SelectLoadOpcode(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
default:
UNREACHABLE();
@@ -820,9 +817,6 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -887,9 +881,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -937,9 +928,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kWord64: // Fall through.
#endif
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1322,7 +1310,7 @@ bool TryMatchShiftFromMul(InstructionSelector* selector, Node* node) {
Node* left = m.left().node();
Node* right = m.right().node();
if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
- base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
+ base::bits::IsPowerOfTwo(g.GetImmediate(right))) {
int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
bool doZeroExt = DoZeroExtForResult(node);
bool canEliminateZeroExt = ProduceWord32Result(left);
@@ -1720,7 +1708,6 @@ static bool CompareLogical(FlagsContinuation* cont) {
return false;
}
UNREACHABLE();
- return false;
}
namespace {
@@ -2220,6 +2207,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2227,7 +2215,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = value_operand;
if (sw.min_value) {
index_operand = g.TempRegister();
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 3660553041..59c684f9bd 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -139,7 +139,6 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
return os << "throw";
}
UNREACHABLE();
- return os;
}
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 76889a69cb..ed74489149 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -940,10 +940,8 @@ class SpecialRPONumberer : public ZoneObject {
size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
- BitVector* new_members = new (zone_)
- BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
- new_members->CopyFrom(*loop.members);
- loop.members = new_members;
+ loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
+ zone_);
}
// Extend loop information vector.
diff --git a/deps/v8/src/compiler/select-lowering.h b/deps/v8/src/compiler/select-lowering.h
index b882a3125f..b66f69f986 100644
--- a/deps/v8/src/compiler/select-lowering.h
+++ b/deps/v8/src/compiler/select-lowering.h
@@ -22,6 +22,8 @@ class SelectLowering final : public Reducer {
SelectLowering(Graph* graph, CommonOperatorBuilder* common);
~SelectLowering();
+ const char* reducer_name() const override { return "SelectLowering"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 6cf88d33cf..1604f020e6 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -9,6 +9,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/objects-inl.h"
#include "src/wasm/wasm-module.h"
@@ -93,6 +94,16 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4LtS) \
+ V(I32x4LeS) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4LtU) \
+ V(I32x4LeU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
@@ -112,7 +123,7 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Min) \
V(F32x4Max)
-#define FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(V) \
+#define FOREACH_FLOAT32X4_TO_INT32X4OPCODE(V) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
@@ -120,18 +131,6 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Gt) \
V(F32x4Ge)
-#define FOREACH_INT32X4_TO_SIMD1X4OPCODE(V) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4LtS) \
- V(I32x4LeS) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4LtU) \
- V(I32x4LeU) \
- V(I32x4GtU) \
- V(I32x4GeU)
-
#define FOREACH_INT16X8_OPCODE(V) \
V(I16x8Splat) \
V(I16x8ExtractLane) \
@@ -150,7 +149,13 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
- V(I16x8MaxU)
+ V(I16x8MaxU) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8LtU) \
+ V(I16x8LeU)
#define FOREACH_INT8X16_OPCODE(V) \
V(I8x16Splat) \
@@ -170,35 +175,27 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16MinU) \
- V(I8x16MaxU)
-
-#define FOREACH_INT16X8_TO_SIMD1X8OPCODE(V) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8LtS) \
- V(I16x8LeS) \
- V(I16x8LtU) \
- V(I16x8LeU)
-
-#define FOREACH_INT8X16_TO_SIMD1X16OPCODE(V) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16LtS) \
- V(I8x16LeS) \
- V(I8x16LtU) \
+ V(I8x16MaxU) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16LtU) \
V(I8x16LeU)
-#define FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(V) \
- V(Float32x4, Float32) \
- V(Int32x4, Int32) \
- V(Int16x8, Int16) \
- V(Int8x16, Int8)
-
-#define FOREACH_SIMD_TYPE_TO_MACHINE_REP(V) \
- V(Float32x4, Float32) \
- V(Int32x4, Word32) \
- V(Int16x8, Word16) \
- V(Int8x16, Word8)
+MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
+ switch (simdType) {
+ case SimdType::kFloat32x4:
+ return MachineType::Float32();
+ case SimdType::kInt32x4:
+ return MachineType::Int32();
+ case SimdType::kInt16x8:
+ return MachineType::Int16();
+ case SimdType::kInt8x16:
+ return MachineType::Int8();
+ }
+ return MachineType::None();
+}
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
@@ -214,55 +211,33 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
- FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
- FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x4;
+ FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_INT16X8_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
}
- FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x8;
- break;
- }
FOREACH_INT8X16_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt8x16;
break;
}
- FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kSimd1x16;
- break;
- }
default: {
switch (output->opcode()) {
- FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kF32x4SConvertI32x4:
case IrOpcode::kF32x4UConvertI32x4: {
replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
- FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
+ FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
- case IrOpcode::kS32x4Select: {
- replacements_[node->id()].type = SimdType::kSimd1x4;
- break;
- }
- FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- }
- FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt8x16;
- break;
- }
- case IrOpcode::kS16x8Select: {
- replacements_[node->id()].type = SimdType::kSimd1x8;
+ case IrOpcode::kS128Select: {
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
default: {
@@ -310,12 +285,11 @@ static int GetReturnCountAfterLowering(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
- if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4 ||
- type == SimdType::kSimd1x4) {
+ if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
- } else if (type == SimdType::kInt16x8 || type == SimdType::kSimd1x8) {
+ } else if (type == SimdType::kInt16x8) {
num_lanes = kNumLanes16;
- } else if (type == SimdType::kInt8x16 || type == SimdType::kSimd1x16) {
+ } else if (type == SimdType::kInt8x16) {
num_lanes = kNumLanes8;
} else {
UNREACHABLE();
@@ -415,18 +389,42 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
}
void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op, bool invert_inputs) {
+ const Operator* op) {
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
int num_lanes = NumLanes(input_rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+void SimdScalarLowering::LowerCompareOp(Node* node, SimdType input_rep_type,
+ const Operator* op,
+ bool invert_inputs) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* cmp_result = nullptr;
if (invert_inputs) {
- rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
+ cmp_result = graph()->NewNode(op, rep_right[i], rep_left[i]);
} else {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ cmp_result = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
+ Diamond d_cmp(graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), cmp_result,
+ jsgraph_->Int32Constant(0)));
+ MachineRepresentation rep =
+ (input_rep_type == SimdType::kFloat32x4)
+ ? MachineRepresentation::kWord32
+ : MachineTypeFrom(input_rep_type).representation();
+ rep_node[i] =
+ d_cmp.Phi(rep, jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(-1));
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -682,8 +680,12 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
- rep_node[i] = d.Phi(MachineRepresentation::kWord32,
- jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
+ MachineRepresentation rep =
+ (input_rep_type == SimdType::kFloat32x4)
+ ? MachineRepresentation::kWord32
+ : MachineTypeFrom(input_rep_type).representation();
+ rep_node[i] =
+ d.Phi(rep, jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(-1));
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -737,17 +739,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
-#define LOAD_CASE(sType, mType) \
- case SimdType::k##sType: \
- load_op = machine()->Load(MachineType::mType()); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(LOAD_CASE)
- default:
- UNREACHABLE();
- }
-#undef LOAD_CASE
+ load_op = machine()->Load(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
@@ -755,17 +747,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
UnalignedLoadRepresentationOf(node->op()).representation();
const Operator* load_op;
-#define UNALIGNED_LOAD_CASE(sType, mType) \
- case SimdType::k##sType: \
- load_op = machine()->UnalignedLoad(MachineType::mType()); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(UNALIGNED_LOAD_CASE)
- default:
- UNREACHABLE();
- }
-#undef UNALIGHNED_LOAD_CASE
+ load_op = machine()->UnalignedLoad(MachineTypeFrom(rep_type));
LowerLoadOp(rep, node, load_op, rep_type);
break;
}
@@ -775,35 +757,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
-#define STORE_CASE(sType, mType) \
- case SimdType::k##sType: \
- store_op = machine()->Store(StoreRepresentation( \
- MachineRepresentation::k##mType, write_barrier_kind)); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(STORE_CASE)
- default:
- UNREACHABLE();
- }
-#undef STORE_CASE
+ store_op = machine()->Store(StoreRepresentation(
+ MachineTypeFrom(rep_type).representation(), write_barrier_kind));
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
-#define UNALIGNED_STORE_CASE(sType, mType) \
- case SimdType::k##sType: \
- store_op = machine()->UnalignedStore(MachineRepresentation::k##mType); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(UNALIGNED_STORE_CASE)
- default:
- UNREACHABLE();
- }
-#undef UNALIGNED_STORE_CASE
+ store_op =
+ machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
@@ -816,7 +779,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kCall: {
- // TODO(turbofan): Make WASM code const-correct wrt. CallDescriptor.
+ // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
if (DefaultLowering(node) ||
@@ -824,8 +787,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
descriptor->GetReturnType(0) == MachineType::Simd128())) {
// We have to adjust the call descriptor.
const Operator* op =
- common()->Call(wasm::ModuleEnv::GetI32WasmCallDescriptorForSimd(
- zone(), descriptor));
+ common()->Call(GetI32WasmCallDescriptorForSimd(zone(), descriptor));
NodeProperties::ChangeOp(node, op);
}
if (descriptor->ReturnCount() == 1 &&
@@ -1050,10 +1012,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
-#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
- case IrOpcode::simd_op: { \
- LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
- break; \
+#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
+ case IrOpcode::simd_op: { \
+ LowerCompareOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
+ break; \
}
COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
@@ -1104,13 +1066,11 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerNotEqual(node, SimdType::kInt8x16, machine()->Word32Equal());
break;
}
- case IrOpcode::kS32x4Select:
- case IrOpcode::kS16x8Select:
- case IrOpcode::kS8x16Select: {
+ case IrOpcode::kS128Select: {
DCHECK(node->InputCount() == 3);
- DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4 ||
- ReplacementType(node->InputAt(0)) == SimdType::kSimd1x8 ||
- ReplacementType(node->InputAt(0)) == SimdType::kSimd1x16);
+ DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kInt32x4 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kInt16x8 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kInt8x16);
Node** boolean_input = GetReplacements(node->InputAt(0));
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
@@ -1119,18 +1079,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
Diamond d(graph(), common(),
graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
jsgraph_->Int32Constant(0)));
-#define SELECT_CASE(sType, mType) \
- case SimdType::k##sType: \
- rep_node[i] = \
- d.Phi(MachineRepresentation::k##mType, rep_right[1], rep_left[0]); \
- break;
-
- switch (rep_type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(SELECT_CASE)
- default:
- UNREACHABLE();
- }
-#undef SELECT_CASE
+ rep_node[i] = d.Phi(MachineTypeFrom(rep_type).representation(),
+ rep_right[1], rep_left[0]);
}
ReplaceNode(node, rep_node, num_lanes);
break;
@@ -1264,19 +1214,9 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
}
Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
for (int i = 0; i < num_lanes; ++i) {
-#define PHI_CASE(sType, mType) \
- case SimdType::k##sType: \
- rep_nodes[i] = graph()->NewNode( \
- common()->Phi(MachineRepresentation::k##mType, value_count), \
- value_count + 1, inputs_rep[i], false); \
- break;
-
- switch (type) {
- FOREACH_SIMD_TYPE_TO_MACHINE_REP(PHI_CASE)
- default:
- UNREACHABLE();
- }
-#undef PHI_CASE
+ rep_nodes[i] = graph()->NewNode(
+ common()->Phi(MachineTypeFrom(type).representation(), value_count),
+ value_count + 1, inputs_rep[i], false);
}
ReplaceNode(phi, rep_nodes, num_lanes);
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 09c78dc983..f7f276cd5e 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -28,15 +28,7 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t {
- kFloat32x4,
- kInt32x4,
- kInt16x8,
- kInt8x16,
- kSimd1x4,
- kSimd1x8,
- kSimd1x16
- };
+ enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 };
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
@@ -81,8 +73,9 @@ class SimdScalarLowering {
const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
- void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
- bool invert_inputs = false);
+ void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op);
+ void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
+ bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
const Operator* op);
@@ -96,6 +89,7 @@ class SimdScalarLowering {
void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
+ MachineType MachineTypeFrom(SimdType simdType);
JSGraph* const jsgraph_;
NodeMarker<State> state_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 33fe9095ce..19578fc7ac 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -84,7 +84,6 @@ MachineRepresentation MachineRepresentationFromArrayType(
return MachineRepresentation::kFloat64;
}
UNREACHABLE();
- return MachineRepresentation::kNone;
}
UseInfo CheckedUseInfoAsWord32FromHint(
@@ -101,7 +100,6 @@ UseInfo CheckedUseInfoAsWord32FromHint(
return UseInfo::CheckedNumberOrOddballAsWord32();
}
UNREACHABLE();
- return UseInfo::None();
}
UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
@@ -117,7 +115,6 @@ UseInfo CheckedUseInfoAsFloat64FromHint(NumberOperationHint hint) {
return UseInfo::CheckedNumberOrOddballAsFloat64();
}
UNREACHABLE();
- return UseInfo::None();
}
UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
@@ -139,14 +136,10 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kBit:
return UseInfo::Bool();
case MachineRepresentation::kSimd128:
- case MachineRepresentation::kSimd1x4:
- case MachineRepresentation::kSimd1x8:
- case MachineRepresentation::kSimd1x16:
case MachineRepresentation::kNone:
break;
}
UNREACHABLE();
- return UseInfo::None();
}
@@ -999,6 +992,18 @@ class RepresentationSelector {
}
}
+ void VisitCheck(Node* node, Type* type, SimplifiedLowering* lowering) {
+ if (InputIs(node, type)) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
+
void VisitCall(Node* node, SimplifiedLowering* lowering) {
const CallDescriptor* desc = CallDescriptorOf(node->op());
int params = static_cast<int>(desc->ParameterCount());
@@ -1564,8 +1569,7 @@ class RepresentationSelector {
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
} else {
- DCHECK_EQ(MachineRepresentation::kNone,
- input_info->representation());
+ DCHECK(!TypeOf(node->InputAt(0))->IsInhabited());
DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else {
@@ -2332,9 +2336,18 @@ class RepresentationSelector {
return;
}
case IrOpcode::kStringCharCodeAt: {
- // TODO(turbofan): Allow builtins to return untagged values.
- VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kTaggedSigned);
+ Type* string_type = TypeOf(node->InputAt(0));
+ if (string_type->Is(Type::SeqString())) {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kWord32);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, simplified()->SeqStringCharCodeAt());
+ }
+ } else {
+ // TODO(turbofan): Allow builtins to return untagged values.
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ }
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2354,7 +2367,12 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedSigned);
return;
}
-
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl: {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kCheckBounds: {
Type* index_type = TypeOf(node->InputAt(0));
Type* length_type = TypeOf(node->InputAt(1));
@@ -2397,14 +2415,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckInternalizedString: {
- if (InputIs(node, Type::InternalizedString())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::InternalizedString(), lowering);
return;
}
case IrOpcode::kCheckNumber: {
@@ -2417,14 +2428,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckReceiver: {
- if (InputIs(node, Type::Receiver())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::Receiver(), lowering);
return;
}
case IrOpcode::kCheckSmi: {
@@ -2440,17 +2444,21 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckString: {
- if (InputIs(node, Type::String())) {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else {
- VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
- MachineRepresentation::kTaggedPointer);
- }
+ VisitCheck(node, Type::String(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckSeqString: {
+ VisitCheck(node, Type::SeqString(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckNonEmptyString: {
+ VisitCheck(node, Type::NonEmptyString(), lowering);
+ return;
+ }
+ case IrOpcode::kCheckSymbol: {
+ VisitCheck(node, Type::Symbol(), lowering);
return;
}
-
case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1);
@@ -2467,14 +2475,24 @@ class RepresentationSelector {
}
case IrOpcode::kStoreField: {
FieldAccess access = FieldAccessOf(node->op());
- NodeInfo* input_info = GetInfo(node->InputAt(1));
+ Node* value_node = node->InputAt(1);
+ NodeInfo* input_info = GetInfo(value_node);
+ MachineRepresentation field_representation =
+ access.machine_type.representation();
+
+ // Make sure we convert to Smi if possible. This should help write
+ // barrier elimination.
+ if (field_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ field_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
- access.base_is_tagged, access.machine_type.representation(),
- access.offset, access.type, input_info->representation(),
- node->InputAt(1));
+ access.base_is_tagged, field_representation, access.offset,
+ access.type, input_info->representation(), value_node);
+
ProcessInput(node, 0, UseInfoForBasePointer(access));
- ProcessInput(node, 1, TruncatingUseInfoFromRepresentation(
- access.machine_type.representation()));
+ ProcessInput(node, 1,
+ TruncatingUseInfoFromRepresentation(field_representation));
ProcessRemainingInputs(node, 2);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
@@ -2543,15 +2561,25 @@ class RepresentationSelector {
}
case IrOpcode::kStoreElement: {
ElementAccess access = ElementAccessOf(node->op());
- NodeInfo* input_info = GetInfo(node->InputAt(2));
+ Node* value_node = node->InputAt(2);
+ NodeInfo* input_info = GetInfo(value_node);
+ MachineRepresentation element_representation =
+ access.machine_type.representation();
+
+ // Make sure we convert to Smi if possible. This should help write
+ // barrier elimination.
+ if (element_representation == MachineRepresentation::kTagged &&
+ TypeOf(value_node)->Is(Type::SignedSmall())) {
+ element_representation = MachineRepresentation::kTaggedSigned;
+ }
WriteBarrierKind write_barrier_kind = WriteBarrierKindFor(
- access.base_is_tagged, access.machine_type.representation(),
- access.type, input_info->representation(), node->InputAt(2));
+ access.base_is_tagged, element_representation, access.type,
+ input_info->representation(), value_node);
ProcessInput(node, 0, UseInfoForBasePointer(access)); // base
ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
ProcessInput(node, 2,
TruncatingUseInfoFromRepresentation(
- access.machine_type.representation())); // value
+ element_representation)); // value
ProcessRemainingInputs(node, 3);
SetOutput(node, MachineRepresentation::kNone);
if (lower()) {
@@ -2563,6 +2591,14 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kTransitionAndStoreElement: {
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // array
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // value
+ ProcessRemainingInputs(node, 3);
+ SetOutput(node, MachineRepresentation::kNone);
+ return;
+ }
case IrOpcode::kLoadTypedElement: {
MachineRepresentation const rep =
MachineRepresentationFromArrayType(ExternalArrayTypeOf(node->op()));
@@ -2752,7 +2788,7 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kCheckTaggedHole: {
+ case IrOpcode::kCheckNotTaggedHole: {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
return;
}
@@ -2839,6 +2875,16 @@ class RepresentationSelector {
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
+ case IrOpcode::kLookupHashStorageIndex:
+ VisitInputs(node);
+ return SetOutput(node, MachineRepresentation::kTaggedSigned);
+
+ case IrOpcode::kLoadHashMapValue:
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // table
+ ProcessInput(node, 1, UseInfo::TruncatingWord32()); // index
+ ProcessRemainingInputs(node, 2);
+ return SetOutput(node, MachineRepresentation::kTagged);
+
// Operators with all inputs tagged and no or tagged output have uniform
// handling.
case IrOpcode::kEnd:
@@ -2873,6 +2919,7 @@ class RepresentationSelector {
case IrOpcode::kJSToName:
case IrOpcode::kJSToObject:
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
VisitInputs(node);
// Assume the output is tagged.
return SetOutput(node, MachineRepresentation::kTagged);
@@ -3596,7 +3643,8 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
void SimplifiedLowering::DoStringToNumber(Node* node) {
Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::StringToNumber(isolate());
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
@@ -3702,7 +3750,7 @@ void SimplifiedLowering::DoUnsigned32ToUint8Clamped(Node* node) {
Node* SimplifiedLowering::ToNumberCode() {
if (!to_number_code_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
to_number_code_.set(jsgraph()->HeapConstant(callable.code()));
}
return to_number_code_.get();
@@ -3710,7 +3758,7 @@ Node* SimplifiedLowering::ToNumberCode() {
Operator const* SimplifiedLowering::ToNumberOperator() {
if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
+ Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags,
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 266cb236ba..39c467d1bc 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -29,6 +29,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorReducer final
SimplifiedOperatorReducer(Editor* editor, JSGraph* jsgraph);
~SimplifiedOperatorReducer() final;
+ const char* reducer_name() const override {
+ return "SimplifiedOperatorReducer";
+ }
+
Reduction Reduce(Node* node) final;
private:
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 476f423749..29e466925f 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -9,6 +9,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
#include "src/objects/map.h"
+#include "src/objects/name.h"
namespace v8 {
namespace internal {
@@ -26,7 +27,6 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
return os << "tagged base";
}
UNREACHABLE();
- return os;
}
@@ -51,7 +51,6 @@ MachineType BufferAccess::machine_type() const {
return MachineType::Float64();
}
UNREACHABLE();
- return MachineType::None();
}
@@ -77,7 +76,6 @@ std::ostream& operator<<(std::ostream& os, BufferAccess access) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return os;
}
@@ -205,7 +203,6 @@ std::ostream& operator<<(std::ostream& os, CheckFloat64HoleMode mode) {
return os << "never-return-hole";
}
UNREACHABLE();
- return os;
}
CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
@@ -233,7 +230,6 @@ std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
return os << "dont-check-for-minus-zero";
}
UNREACHABLE();
- return os;
}
std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
@@ -286,11 +282,11 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
return os << "NumberOrOddball";
}
UNREACHABLE();
- return os;
}
CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kCheckedTaggedToFloat64, op->opcode());
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64 ||
+ op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
return OpParameter<CheckTaggedInputMode>(op);
}
@@ -345,7 +341,6 @@ std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
<< " to " << Brief(*transition.target());
}
UNREACHABLE();
- return os;
}
ElementsTransition const& ElementsTransitionOf(const Operator* op) {
@@ -353,6 +348,55 @@ ElementsTransition const& ElementsTransitionOf(const Operator* op) {
return OpParameter<ElementsTransition>(op);
}
+namespace {
+
+// Parameters for the TransitionAndStoreElement opcode.
+class TransitionAndStoreElementParameters final {
+ public:
+ TransitionAndStoreElementParameters(Handle<Map> double_map,
+ Handle<Map> fast_map);
+
+ Handle<Map> double_map() const { return double_map_; }
+ Handle<Map> fast_map() const { return fast_map_; }
+
+ private:
+ Handle<Map> const double_map_;
+ Handle<Map> const fast_map_;
+};
+
+TransitionAndStoreElementParameters::TransitionAndStoreElementParameters(
+ Handle<Map> double_map, Handle<Map> fast_map)
+ : double_map_(double_map), fast_map_(fast_map) {}
+
+bool operator==(TransitionAndStoreElementParameters const& lhs,
+ TransitionAndStoreElementParameters const& rhs) {
+ return lhs.fast_map().address() == rhs.fast_map().address() &&
+ lhs.double_map().address() == rhs.double_map().address();
+}
+
+size_t hash_value(TransitionAndStoreElementParameters parameters) {
+ return base::hash_combine(parameters.fast_map().address(),
+ parameters.double_map().address());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ TransitionAndStoreElementParameters parameters) {
+ return os << "fast-map" << Brief(*parameters.fast_map()) << " double-map"
+ << Brief(*parameters.double_map());
+}
+
+} // namespace
+
+Handle<Map> DoubleMapParameterOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ return OpParameter<TransitionAndStoreElementParameters>(op).double_map();
+}
+
+Handle<Map> FastMapParameterOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kTransitionAndStoreElement);
+ return OpParameter<TransitionAndStoreElementParameters>(op).fast_map();
+}
+
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
@@ -365,7 +409,6 @@ std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
return os << "NumberOrOddball";
}
UNREACHABLE();
- return os;
}
size_t hash_value(NumberOperationHint hint) {
@@ -480,8 +523,11 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
@@ -521,29 +567,31 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(SpeculativeNumberLessThan) \
V(SpeculativeNumberLessThanOrEqual)
-#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
- V(CheckHeapObject, 1, 1) \
- V(CheckIf, 1, 0) \
- V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
- V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
- V(CheckTaggedHole, 1, 1) \
- V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
- V(CheckedInt32Div, 2, 1) \
- V(CheckedInt32Mod, 2, 1) \
- V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
- V(CheckedInt32ToTaggedSigned, 1, 1) \
- V(CheckedTaggedSignedToInt32, 1, 1) \
- V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedTaggedToTaggedPointer, 1, 1) \
- V(CheckedTruncateTaggedToWord32, 1, 1)
+#define CHECKED_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckHeapObject, 1, 1) \
+ V(CheckIf, 1, 0) \
+ V(CheckInternalizedString, 1, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckReceiver, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
+ V(CheckSeqString, 1, 1) \
+ V(CheckNonEmptyString, 1, 1) \
+ V(CheckSymbol, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
+ V(CheckedInt32Add, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
+ V(CheckedInt32Div, 2, 1) \
+ V(CheckedInt32Mod, 2, 1) \
+ V(CheckedUint32Div, 2, 1) \
+ V(CheckedUint32Mod, 2, 1) \
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedInt32ToTaggedSigned, 1, 1) \
+ V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedSigned, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -586,6 +634,20 @@ struct SimplifiedOperatorGlobalCache final {
};
ArrayBufferWasNeuteredOperator kArrayBufferWasNeutered;
+ struct LookupHashStorageIndexOperator final : public Operator {
+ LookupHashStorageIndexOperator()
+ : Operator(IrOpcode::kLookupHashStorageIndex, Operator::kEliminatable,
+ "LookupHashStorageIndex", 2, 1, 1, 1, 1, 0) {}
+ };
+ LookupHashStorageIndexOperator kLookupHashStorageIndex;
+
+ struct LoadHashMapValueOperator final : public Operator {
+ LoadHashMapValueOperator()
+ : Operator(IrOpcode::kLoadHashMapValue, Operator::kEliminatable,
+ "LoadHashMapValue", 2, 1, 1, 1, 1, 0) {}
+ };
+ LoadHashMapValueOperator kLoadHashMapValue;
+
struct ArgumentsFrameOperator final : public Operator {
ArgumentsFrameOperator()
: Operator(IrOpcode::kArgumentsFrame, Operator::kPure, "ArgumentsFrame",
@@ -670,6 +732,20 @@ struct SimplifiedOperatorGlobalCache final {
CheckedTaggedToFloat64Operator<CheckTaggedInputMode::kNumberOrOddball>
kCheckedTaggedToFloat64NumberOrOddballOperator;
+ template <CheckTaggedInputMode kMode>
+ struct CheckedTruncateTaggedToWord32Operator final
+ : public Operator1<CheckTaggedInputMode> {
+ CheckedTruncateTaggedToWord32Operator()
+ : Operator1<CheckTaggedInputMode>(
+ IrOpcode::kCheckedTruncateTaggedToWord32,
+ Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, kMode) {}
+ };
+ CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
+ kCheckedTruncateTaggedToWord32NumberOperator;
+ CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumberOrOddball>
+ kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+
template <CheckFloat64HoleMode kMode>
struct CheckFloat64HoleNaNOperator final
: public Operator1<CheckFloat64HoleMode> {
@@ -768,6 +844,8 @@ PURE_OP_LIST(GET_FROM_CACHE)
CHECKED_OP_LIST(GET_FROM_CACHE)
GET_FROM_CACHE(ArrayBufferWasNeutered)
GET_FROM_CACHE(ArgumentsFrame)
+GET_FROM_CACHE(LookupHashStorageIndex)
+GET_FROM_CACHE(LoadHashMapValue)
GET_FROM_CACHE(NewUnmappedArgumentsElements)
#undef GET_FROM_CACHE
@@ -780,7 +858,6 @@ const Operator* SimplifiedOperatorBuilder::ChangeFloat64ToTagged(
return &cache_.kChangeFloat64ToTaggedDontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
@@ -792,7 +869,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
return &cache_.kCheckedInt32MulDontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
@@ -804,7 +880,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
@@ -816,7 +891,6 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
@@ -828,7 +902,17 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
return &cache_.kCheckedTaggedToFloat64NumberOrOddballOperator;
}
UNREACHABLE();
- return nullptr;
+}
+
+const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
+ CheckTaggedInputMode mode) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ }
+ UNREACHABLE();
}
const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
@@ -851,7 +935,6 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
return &cache_.kCheckFloat64HoleNeverReturnHoleOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
@@ -867,7 +950,6 @@ const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
return &cache_.kSpeculativeToNumberNumberOrOddballOperator;
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
@@ -957,7 +1039,6 @@ const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
#undef LOAD_BUFFER
}
UNREACHABLE();
- return nullptr;
}
@@ -970,7 +1051,6 @@ const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
#undef STORE_BUFFER
}
UNREACHABLE();
- return nullptr;
}
const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
@@ -982,7 +1062,6 @@ const Operator* SimplifiedOperatorBuilder::StringFromCodePoint(
return &cache_.kStringFromCodePointOperatorUTF32;
}
UNREACHABLE();
- return nullptr;
}
#define SPECULATIVE_NUMBER_BINOP(Name) \
@@ -1023,6 +1102,15 @@ SPECULATIVE_NUMBER_BINOP_LIST(SPECULATIVE_NUMBER_BINOP)
ACCESS_OP_LIST(ACCESS)
#undef ACCESS
+const Operator* SimplifiedOperatorBuilder::TransitionAndStoreElement(
+ Handle<Map> double_map, Handle<Map> fast_map) {
+ TransitionAndStoreElementParameters parameters(double_map, fast_map);
+ return new (zone()) Operator1<TransitionAndStoreElementParameters>(
+ IrOpcode::kTransitionAndStoreElement,
+ Operator::kNoDeopt | Operator::kNoThrow, "TransitionAndStoreElement", 3,
+ 1, 1, 0, 1, 0, parameters);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index ac53bfc72e..f2739acef3 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -228,6 +228,10 @@ std::ostream& operator<<(std::ostream&, ElementsTransition);
ElementsTransition const& ElementsTransitionOf(const Operator* op)
WARN_UNUSED_RESULT;
+// Parameters for TransitionAndStoreElement.
+Handle<Map> DoubleMapParameterOf(const Operator* op);
+Handle<Map> FastMapParameterOf(const Operator* op);
+
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
kSignedSmall, // Inputs were always Smi so far, output was in Smi range.
@@ -378,9 +382,15 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringLessThanOrEqual();
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
+ const Operator* SeqStringCharCodeAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* StringToLowerCaseIntl();
+ const Operator* StringToUpperCaseIntl();
+
+ const Operator* LookupHashStorageIndex();
+ const Operator* LoadHashMapValue();
const Operator* SpeculativeToNumber(NumberOperationHint hint);
@@ -414,6 +424,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckNumber();
const Operator* CheckSmi();
const Operator* CheckString();
+ const Operator* CheckSeqString();
+ const Operator* CheckNonEmptyString();
+ const Operator* CheckSymbol();
const Operator* CheckReceiver();
const Operator* CheckedInt32Add();
@@ -432,10 +445,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
const Operator* CheckedTaggedToTaggedSigned();
const Operator* CheckedTaggedToTaggedPointer();
- const Operator* CheckedTruncateTaggedToWord32();
+ const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
- const Operator* CheckTaggedHole();
+ const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
const Operator* ObjectIsDetectableCallable();
@@ -484,6 +497,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// store-element [base + index], value
const Operator* StoreElement(ElementAccess const&);
+ // store-element [base + index], value, only with fast arrays.
+ const Operator* TransitionAndStoreElement(Handle<Map> double_map,
+ Handle<Map> fast_map);
+
// load-typed-element buffer, [base + external + index]
const Operator* LoadTypedElement(ExternalArrayType const&);
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 196cb0d608..71aa2110bb 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -329,7 +329,6 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
}
UNREACHABLE();
- return UnobservablesSet::Unvisited();
}
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
deleted file mode 100644
index 51299f8c66..0000000000
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/tail-call-optimization.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Reduction TailCallOptimization::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kReturn) return NoChange();
- // The value which is returned must be the result of a potential tail call,
- // there must be no try/catch/finally around the Call, and there must be no
- // other effect or control between the Call and the Return nodes.
- Node* const call = NodeProperties::GetValueInput(node, 1);
- if (call->opcode() == IrOpcode::kCall &&
- CallDescriptorOf(call->op())->SupportsTailCalls() &&
- NodeProperties::GetEffectInput(node) == call &&
- NodeProperties::GetControlInput(node) == call &&
- !NodeProperties::IsExceptionalCall(call) && call->UseCount() == 3) {
- // Ensure that no additional arguments are being popped other than those in
- // the CallDescriptor, otherwise the tail call transformation is invalid.
- DCHECK_EQ(0, Int32Matcher(NodeProperties::GetValueInput(node, 0)).Value());
- // Furthermore, the Return node value, effect, and control depends
- // directly on the Call, no other uses of the Call node exist.
- //
- // The input graph looks as follows:
-
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // Call[Descriptor]
- // ^ ^ ^
- // Int32(0) <-+ | | |
- // \ | | |
- // Return
- // ^
- // |
-
- // The resulting graph looks like this:
-
- // Value1 ... ValueN Effect Control
- // ^ ^ ^ ^
- // | | | |
- // | +--+ +-+ |
- // +----------+ | | +------+
- // \ | | /
- // TailCall[Descriptor]
- // ^
- // |
-
- DCHECK_EQ(4, node->InputCount());
- node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
- node->ReplaceInput(1, NodeProperties::GetControlInput(call));
- node->RemoveInput(3);
- node->RemoveInput(2);
- for (int index = 0; index < call->op()->ValueInputCount(); ++index) {
- node->InsertInput(graph()->zone(), index,
- NodeProperties::GetValueInput(call, index));
- }
- NodeProperties::ChangeOp(node,
- common()->TailCall(CallDescriptorOf(call->op())));
- return Changed(node);
- }
- return NoChange();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/tail-call-optimization.h b/deps/v8/src/compiler/tail-call-optimization.h
deleted file mode 100644
index d693f3694c..0000000000
--- a/deps/v8/src/compiler/tail-call-optimization.h
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
-#define V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
-
-#include "src/compiler/graph-reducer.h"
-#include "src/globals.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class Graph;
-
-
-// Performs tail call optimization by replacing certain combinations of Return
-// and Call nodes with a single TailCall.
-class V8_EXPORT_PRIVATE TailCallOptimization final : public Reducer {
- public:
- TailCallOptimization(CommonOperatorBuilder* common, Graph* graph)
- : common_(common), graph_(graph) {}
-
- Reduction Reduce(Node* node) final;
-
- private:
- CommonOperatorBuilder* common() const { return common_; }
- Graph* graph() const { return graph_; }
-
- CommonOperatorBuilder* const common_;
- Graph* const graph_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_TAIL_CALL_OPTIMIZATION_H_
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 3d9801bc10..5ac9072174 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -7,6 +7,7 @@
#include "src/compiler/types.h"
#include "src/date.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index b95e22a2e5..8be08630b5 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -76,12 +76,18 @@ Reduction TypedOptimization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckHeapObject:
return ReduceCheckHeapObject(node);
+ case IrOpcode::kCheckNotTaggedHole:
+ return ReduceCheckNotTaggedHole(node);
case IrOpcode::kCheckMaps:
return ReduceCheckMaps(node);
case IrOpcode::kCheckNumber:
return ReduceCheckNumber(node);
case IrOpcode::kCheckString:
return ReduceCheckString(node);
+ case IrOpcode::kCheckSeqString:
+ return ReduceCheckSeqString(node);
+ case IrOpcode::kCheckNonEmptyString:
+ return ReduceCheckNonEmptyString(node);
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kNumberCeil:
@@ -128,6 +134,16 @@ Reduction TypedOptimization::ReduceCheckHeapObject(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckNotTaggedHole(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (!input_type->Maybe(Type::Hole())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceCheckMaps(Node* node) {
// The CheckMaps(o, ...map...) can be eliminated if map is stable,
// o has type Constant(object) and map == object->map, and either
@@ -174,6 +190,26 @@ Reduction TypedOptimization::ReduceCheckString(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceCheckSeqString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::SeqString())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
+Reduction TypedOptimization::ReduceCheckNonEmptyString(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(Type::NonEmptyString())) {
+ ReplaceWithValue(node, input);
+ return Replace(input);
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceLoadField(Node* node) {
Node* const object = NodeProperties::GetValueInput(node, 0);
Type* const object_type = NodeProperties::GetType(object);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index c441daf222..cd4085c3fc 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -39,6 +39,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Flags flags, JSGraph* jsgraph);
~TypedOptimization();
+ const char* reducer_name() const override { return "TypedOptimization"; }
+
Reduction Reduce(Node* node) final;
private:
@@ -46,6 +48,8 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckNumber(Node* node);
Reduction ReduceCheckString(Node* node);
+ Reduction ReduceCheckSeqString(Node* node);
+ Reduction ReduceCheckNonEmptyString(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
@@ -54,6 +58,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceSpeculativeToNumber(Node* node);
+ Reduction ReduceCheckNotTaggedHole(Node* node);
CompilationDependencies* dependencies() const { return dependencies_; }
Factory* factory() const;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index f92d507dfb..94b6e5a922 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -43,7 +43,7 @@ Typer::Typer(Isolate* isolate, Flags flags, Graph* graph)
Zone* zone = this->zone();
Factory* const factory = isolate->factory();
- singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
+ singleton_empty_string_ = Type::NewConstant(factory->empty_string(), zone);
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
falsish_ = Type::Union(
@@ -73,6 +73,8 @@ class Typer::Visitor : public Reducer {
induction_vars_(induction_vars),
weakened_nodes_(typer->zone()) {}
+ const char* reducer_name() const override { return "Typer"; }
+
Reduction Reduce(Node* node) override {
if (node->op()->ValueOutputCount() == 0) return NoChange();
switch (node->opcode()) {
@@ -207,7 +209,6 @@ class Typer::Visitor : public Reducer {
break;
}
UNREACHABLE();
- return nullptr;
}
Type* TypeConstant(Handle<Object> value);
@@ -271,6 +272,7 @@ class Typer::Visitor : public Reducer {
static Type* ToNumber(Type*, Typer*);
static Type* ToObject(Type*, Typer*);
static Type* ToString(Type*, Typer*);
+ static Type* ToPrimitiveToString(Type*, Typer*);
#define DECLARE_METHOD(Name) \
static Type* Name(Type* type, Typer* t) { \
return t->operation_typer_.Name(type); \
@@ -505,6 +507,15 @@ Type* Typer::Visitor::ToString(Type* type, Typer* t) {
return Type::String();
}
+// static
+Type* Typer::Visitor::ToPrimitiveToString(Type* type, Typer* t) {
+ // ES6 section 7.1.1 ToPrimitive( argument, "default" ) followed by
+ // ES6 section 7.1.12 ToString ( argument )
+ type = ToPrimitive(type, t);
+ if (type->Is(Type::String())) return type;
+ return Type::String();
+}
+
// Type checks.
Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
@@ -609,37 +620,30 @@ Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeRetain(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeInt32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeInt64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeRelocatableInt64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeNumberConstant(Node* node) {
@@ -784,19 +788,16 @@ Type* Typer::Visitor::TypeInductionVariablePhi(Node* node) {
Type* Typer::Visitor::TypeEffectPhi(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeLoopExit(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeLoopExitValue(Node* node) { return Operand(node, 0); }
Type* Typer::Visitor::TypeLoopExitEffect(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeEnsureWritableFastElements(Node* node) {
@@ -809,17 +810,14 @@ Type* Typer::Visitor::TypeMaybeGrowFastElements(Node* node) {
Type* Typer::Visitor::TypeTransitionElementsKind(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckpoint(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeBeginRegion(Node* node) {
UNREACHABLE();
- return nullptr;
}
@@ -1020,6 +1018,9 @@ Type* Typer::Visitor::JSShiftRightLogicalTyper(Type* lhs, Type* rhs, Typer* t) {
return NumberShiftRightLogical(ToNumber(lhs, t), ToNumber(rhs, t), t);
}
+// JS string concatenation.
+
+Type* Typer::Visitor::TypeJSStringConcat(Node* node) { return Type::String(); }
// JS arithmetic operators.
@@ -1096,6 +1097,10 @@ Type* Typer::Visitor::TypeJSToString(Node* node) {
return TypeUnaryOp(node, ToString);
}
+Type* Typer::Visitor::TypeJSToPrimitiveToString(Node* node) {
+ return TypeUnaryOp(node, ToPrimitiveToString);
+}
+
// JS object operators.
@@ -1111,7 +1116,6 @@ Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
return Type::OtherObject();
}
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
@@ -1244,29 +1248,24 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
Type* Typer::Visitor::TypeJSStoreProperty(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreNamed(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreNamedOwn(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
@@ -1277,6 +1276,11 @@ Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
// JS instanceof operator.
+Type* Typer::Visitor::JSHasInPrototypeChainTyper(Type* lhs, Type* rhs,
+ Typer* t) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
return Type::Boolean();
}
@@ -1309,7 +1313,6 @@ Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
UNREACHABLE();
- return nullptr;
}
@@ -1341,6 +1344,10 @@ Type* Typer::Visitor::TypeJSConstructForwardVarargs(Node* node) {
Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
+Type* Typer::Visitor::TypeJSConstructWithArrayLike(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
return Type::Receiver();
}
@@ -1475,6 +1482,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kTypedArrayKeys:
case kTypedArrayValues:
case kArrayIteratorNext:
+ case kMapIteratorNext:
+ case kSetIteratorNext:
return Type::OtherObject();
// Array functions.
@@ -1519,6 +1528,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kObjectCreate:
return Type::OtherObject();
case kObjectHasOwnProperty:
+ case kObjectIsPrototypeOf:
return Type::Boolean();
case kObjectToString:
return Type::String();
@@ -1534,6 +1544,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
return Type::String();
// Function functions.
+ case kFunctionBind:
+ return Type::BoundFunction();
case kFunctionHasInstance:
return Type::Boolean();
@@ -1565,7 +1577,6 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
// Set functions.
case kSetAdd:
case kSetEntries:
- case kSetKeys:
case kSetValues:
return Type::OtherObject();
case kSetClear:
@@ -1606,6 +1617,10 @@ Type* Typer::Visitor::TypeJSCall(Node* node) {
return TypeUnaryOp(node, JSCallTyper);
}
+Type* Typer::Visitor::TypeJSCallWithArrayLike(Node* node) {
+ return TypeUnaryOp(node, JSCallTyper);
+}
+
Type* Typer::Visitor::TypeJSCallWithSpread(Node* node) {
return TypeUnaryOp(node, JSCallTyper);
}
@@ -1675,19 +1690,16 @@ Type* Typer::Visitor::TypeJSLoadMessage(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSStoreMessage(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSLoadModule(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSStoreModule(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSGeneratorStore(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeJSGeneratorRestoreContinuation(Node* node) {
@@ -1791,10 +1803,18 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+Type* Typer::Visitor::TypeStringToLowerCaseIntl(Node* node) { UNREACHABLE(); }
+
+Type* Typer::Visitor::TypeStringToUpperCaseIntl(Node* node) { UNREACHABLE(); }
+
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
+Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
+ return typer_->cache_.kUint16;
+}
+
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
@@ -1828,7 +1848,6 @@ Type* Typer::Visitor::TypeCheckHeapObject(Node* node) {
Type* Typer::Visitor::TypeCheckIf(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
@@ -1838,7 +1857,6 @@ Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
Type* Typer::Visitor::TypeCheckMaps(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeCheckNumber(Node* node) {
@@ -1860,11 +1878,26 @@ Type* Typer::Visitor::TypeCheckString(Node* node) {
return Type::Intersect(arg, Type::String(), zone());
}
+Type* Typer::Visitor::TypeCheckSeqString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::SeqString(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckNonEmptyString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::NonEmptyString(), zone());
+}
+
+Type* Typer::Visitor::TypeCheckSymbol(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::Symbol(), zone());
+}
+
Type* Typer::Visitor::TypeCheckFloat64Hole(Node* node) {
return typer_->operation_typer_.CheckFloat64Hole(Operand(node, 0));
}
-Type* Typer::Visitor::TypeCheckTaggedHole(Node* node) {
+Type* Typer::Visitor::TypeCheckNotTaggedHole(Node* node) {
Type* type = Operand(node, 0);
type = Type::Intersect(type, Type::NonInternal(), zone());
return type;
@@ -1897,7 +1930,6 @@ Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return nullptr;
}
@@ -1914,29 +1946,28 @@ Type* Typer::Visitor::TypeLoadTypedElement(Node* node) {
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreField(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreBuffer(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeStoreElement(Node* node) {
UNREACHABLE();
- return nullptr;
+}
+
+Type* Typer::Visitor::TypeTransitionAndStoreElement(Node* node) {
+ UNREACHABLE();
}
Type* Typer::Visitor::TypeStoreTypedElement(Node* node) {
UNREACHABLE();
- return nullptr;
}
Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
@@ -1993,6 +2024,14 @@ Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeLookupHashStorageIndex(Node* node) {
+ return Type::SignedSmall();
+}
+
+Type* Typer::Visitor::TypeLoadHashMapValue(Node* node) {
+ return Type::NonInternal();
+}
+
// Heap constants.
Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index ef2d3a0ef6..73510d7db0 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -80,7 +80,6 @@ double Type::Min() {
if (this->IsOtherNumberConstant())
return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
- return 0;
}
double Type::Max() {
@@ -97,7 +96,6 @@ double Type::Max() {
if (this->IsOtherNumberConstant())
return this->AsOtherNumberConstant()->Value();
UNREACHABLE();
- return 0;
}
// -----------------------------------------------------------------------------
@@ -142,14 +140,11 @@ Type::bitset BitsetType::Lub(Type* type) {
if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsTuple()) return kOtherInternal;
UNREACHABLE();
- return kNone;
}
Type::bitset BitsetType::Lub(i::Map* map) {
DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
- case STRING_TYPE:
- case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
@@ -162,16 +157,20 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kOtherString;
- case INTERNALIZED_STRING_TYPE:
- case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ return kOtherNonSeqString;
+ case STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
+ return kOtherSeqString;
case EXTERNAL_INTERNALIZED_STRING_TYPE:
case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
- return kInternalizedString;
+ return kInternalizedNonSeqString;
+ case INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ return kInternalizedSeqString;
case SYMBOL_TYPE:
return kSymbol;
case ODDBALL_TYPE: {
@@ -223,8 +222,11 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
- case JS_SET_ITERATOR_TYPE:
- case JS_MAP_ITERATOR_TYPE:
+ case JS_SET_KEY_VALUE_ITERATOR_TYPE:
+ case JS_SET_VALUE_ITERATOR_TYPE:
+ case JS_MAP_KEY_ITERATOR_TYPE:
+ case JS_MAP_KEY_VALUE_ITERATOR_TYPE:
+ case JS_MAP_VALUE_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
@@ -268,6 +270,10 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
+ case WASM_MODULE_TYPE:
+ case WASM_INSTANCE_TYPE:
+ case WASM_MEMORY_TYPE:
+ case WASM_TABLE_TYPE:
DCHECK(!map->is_callable());
DCHECK(!map->is_undetectable());
return kOtherObject;
@@ -292,6 +298,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
case TRANSITION_ARRAY_TYPE:
+ case PROPERTY_ARRAY_TYPE:
case FOREIGN_TYPE:
case SCRIPT_TYPE:
case CODE_TYPE:
@@ -321,20 +328,17 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case DEBUG_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case WEAK_CELL_TYPE:
+ case SMALL_ORDERED_HASH_MAP_TYPE:
+ case SMALL_ORDERED_HASH_SET_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
- case PADDING_TYPE_1:
- case PADDING_TYPE_2:
- case PADDING_TYPE_3:
- case PADDING_TYPE_4:
+ case PREPARSED_SCOPE_DATA_TYPE:
UNREACHABLE();
- return kNone;
}
UNREACHABLE();
- return kNone;
}
Type::bitset BitsetType::Lub(i::Object* value) {
@@ -342,7 +346,11 @@ Type::bitset BitsetType::Lub(i::Object* value) {
if (value->IsNumber()) {
return Lub(value->Number());
}
- return Lub(i::HeapObject::cast(value)->map());
+ i::HeapObject* heap_value = i::HeapObject::cast(value);
+ if (value == heap_value->GetHeap()->empty_string()) {
+ return kEmptyString;
+ }
+ return Lub(heap_value->map()) & ~kEmptyString;
}
Type::bitset BitsetType::Lub(double value) {
@@ -466,6 +474,8 @@ HeapConstantType::HeapConstantType(BitsetType::bitset bitset,
: TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
DCHECK(!object->IsHeapNumber());
DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
+ DCHECK_IMPLIES(object->IsString(),
+ i::Handle<i::String>::cast(object)->length() != 0);
}
// -----------------------------------------------------------------------------
@@ -499,7 +509,6 @@ bool Type::SimplyEquals(Type* that) {
return true;
}
UNREACHABLE();
- return false;
}
// Check if [this] <= [that].
@@ -841,8 +850,13 @@ Type* Type::NewConstant(i::Handle<i::Object> value, Zone* zone) {
return Range(v, v, zone);
} else if (value->IsHeapNumber()) {
return NewConstant(value->Number(), zone);
- } else if (value->IsString() && !value->IsInternalizedString()) {
- return Type::OtherString();
+ } else if (value->IsString()) {
+ i::Isolate* isolate = i::Handle<i::HeapObject>::cast(value)->GetIsolate();
+ if (!value->IsInternalizedString()) {
+ return Type::OtherString();
+ } else if (*value == isolate->heap()->empty_string()) {
+ return Type::EmptyString();
+ }
}
return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 452ac7658e..18df2758f2 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -105,28 +105,31 @@ namespace compiler {
V(OtherNumber, 1u << 4) \
#define PROPER_BITSET_TYPE_LIST(V) \
- V(None, 0u) \
- V(Negative31, 1u << 5) \
- V(Null, 1u << 6) \
- V(Undefined, 1u << 7) \
- V(Boolean, 1u << 8) \
- V(Unsigned30, 1u << 9) \
- V(MinusZero, 1u << 10) \
- V(NaN, 1u << 11) \
- V(Symbol, 1u << 12) \
- V(InternalizedString, 1u << 13) \
- V(OtherString, 1u << 14) \
- V(OtherCallable, 1u << 15) \
- V(OtherObject, 1u << 16) \
- V(OtherUndetectable, 1u << 17) \
- V(CallableProxy, 1u << 18) \
- V(OtherProxy, 1u << 19) \
- V(Function, 1u << 20) \
- V(BoundFunction, 1u << 21) \
- V(Hole, 1u << 22) \
- V(OtherInternal, 1u << 23) \
- V(ExternalPointer, 1u << 24) \
- V(Array, 1u << 25) \
+ V(None, 0u) \
+ V(Negative31, 1u << 5) \
+ V(Null, 1u << 6) \
+ V(Undefined, 1u << 7) \
+ V(Boolean, 1u << 8) \
+ V(Unsigned30, 1u << 9) \
+ V(MinusZero, 1u << 10) \
+ V(NaN, 1u << 11) \
+ V(Symbol, 1u << 12) \
+ V(EmptyString, 1u << 13) \
+ V(InternalizedNonEmptySeqString, 1u << 14) \
+ V(InternalizedNonSeqString, 1u << 15) \
+ V(OtherNonSeqString, 1u << 16) \
+ V(OtherSeqString, 1u << 17) \
+ V(OtherCallable, 1u << 18) \
+ V(OtherObject, 1u << 19) \
+ V(OtherUndetectable, 1u << 20) \
+ V(CallableProxy, 1u << 21) \
+ V(OtherProxy, 1u << 22) \
+ V(Function, 1u << 23) \
+ V(BoundFunction, 1u << 24) \
+ V(Hole, 1u << 25) \
+ V(OtherInternal, 1u << 26) \
+ V(ExternalPointer, 1u << 27) \
+ V(Array, 1u << 28) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -146,7 +149,17 @@ namespace compiler {
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
V(Number, kOrderedNumber | kNaN) \
- V(String, kInternalizedString | kOtherString) \
+ V(InternalizedSeqString, kEmptyString | \
+ kInternalizedNonEmptySeqString) \
+ V(InternalizedString, kInternalizedSeqString | \
+ kInternalizedNonSeqString) \
+ V(OtherString, kOtherNonSeqString | kOtherSeqString) \
+ V(SeqString, kInternalizedSeqString | kOtherSeqString) \
+ V(NonSeqString, kInternalizedNonSeqString | \
+ kOtherNonSeqString) \
+ V(NonEmptyString, kInternalizedNonEmptySeqString | \
+ kInternalizedNonSeqString| kOtherString) \
+ V(String, kNonEmptyString | kEmptyString) \
V(UniqueName, kSymbol | kInternalizedString) \
V(Name, kSymbol | kString) \
V(InternalizedStringOrNull, kInternalizedString | kNull) \
diff --git a/deps/v8/src/compiler/value-numbering-reducer.h b/deps/v8/src/compiler/value-numbering-reducer.h
index 521ce59f20..44195468c3 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.h
+++ b/deps/v8/src/compiler/value-numbering-reducer.h
@@ -19,6 +19,8 @@ class V8_EXPORT_PRIVATE ValueNumberingReducer final
explicit ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone);
~ValueNumberingReducer();
+ const char* reducer_name() const override { return "ValueNumberingReducer"; }
+
Reduction Reduce(Node* node) override;
private:
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a1310ed22f..dbb05460a2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -543,6 +543,16 @@ void Verifier::Visitor::Check(Node* node) {
// Type is 32 bit integral.
CheckTypeIs(node, Type::Integral32());
break;
+
+ case IrOpcode::kJSStringConcat:
+ // Type is string and all inputs are strings.
+ CheckTypeIs(node, Type::String());
+ for (int i = 0; i < StringConcatParameterOf(node->op()).operand_count();
+ i++) {
+ CheckValueInputIs(node, i, Type::String());
+ }
+ break;
+
case IrOpcode::kJSAdd:
// Type is Number or String.
CheckTypeIs(node, Type::NumberOrString());
@@ -575,6 +585,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kJSToString:
+ case IrOpcode::kJSToPrimitiveToString:
// Type is String.
CheckTypeIs(node, Type::String());
break;
@@ -657,6 +668,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
+ case IrOpcode::kJSHasInPrototypeChain:
case IrOpcode::kJSInstanceOf:
case IrOpcode::kJSOrdinaryHasInstance:
// Type is Boolean.
@@ -702,6 +714,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
+ case IrOpcode::kJSConstructWithArrayLike:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
@@ -709,6 +722,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSCallForwardVarargs:
case IrOpcode::kJSCall:
+ case IrOpcode::kJSCallWithArrayLike:
case IrOpcode::kJSCallWithSpread:
case IrOpcode::kJSCallRuntime:
// Type can be anything.
@@ -952,6 +966,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
+ case IrOpcode::kSeqStringCharCodeAt:
+ // (SeqString, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::SeqString());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
@@ -969,6 +989,11 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 2, Type::SignedSmall());
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kStringToLowerCaseIntl:
+ case IrOpcode::kStringToUpperCaseIntl:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kReferenceEqual:
// (Unique, Any) -> Boolean and
@@ -989,6 +1014,13 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kLookupHashStorageIndex:
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
+ case IrOpcode::kLoadHashMapValue:
+ CheckValueInputIs(node, 2, Type::SignedSmall());
+ CheckTypeIs(node, Type::SignedSmall());
+ break;
case IrOpcode::kArgumentsLength:
CheckValueInputIs(node, 0, Type::ExternalPointer());
CheckTypeIs(node, TypeCache::Get().kArgumentsLengthType);
@@ -1178,6 +1210,17 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kCheckSeqString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::SeqString());
+ break;
+ case IrOpcode::kCheckNonEmptyString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::NonEmptyString());
+ break;
+ case IrOpcode::kCheckSymbol:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Symbol());
case IrOpcode::kCheckedInt32Add:
case IrOpcode::kCheckedInt32Sub:
@@ -1202,7 +1245,7 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::NumberOrHole());
CheckTypeIs(node, Type::NumberOrUndefined());
break;
- case IrOpcode::kCheckTaggedHole:
+ case IrOpcode::kCheckNotTaggedHole:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::NonInternal());
break;
@@ -1243,6 +1286,9 @@ void Verifier::Visitor::Check(Node* node) {
// CheckValueInputIs(node, 1, ElementAccessOf(node->op()).type));
CheckNotTyped(node);
break;
+ case IrOpcode::kTransitionAndStoreElement:
+ CheckNotTyped(node);
+ break;
case IrOpcode::kStoreTypedElement:
CheckNotTyped(node);
break;
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 56c8f6cbef..2b01c290c7 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -7,11 +7,11 @@
#include <memory>
#include "src/assembler-inl.h"
+#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stubs.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -45,16 +45,15 @@
#define WASM_64 0
#endif
+#define FATAL_UNSUPPORTED_OPCODE(opcode) \
+ V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", (opcode), \
+ wasm::WasmOpcodes::OpcodeName(opcode));
+
namespace v8 {
namespace internal {
namespace compiler {
namespace {
-const Operator* UnsupportedOpcode(wasm::WasmOpcode opcode) {
- V8_Fatal(__FILE__, __LINE__, "Unsupported opcode #%d:%s", opcode,
- wasm::WasmOpcodes::OpcodeName(opcode));
- return nullptr;
-}
void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
Graph* g = jsgraph->graph();
@@ -65,97 +64,6 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
-Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
- Node* centry_stub_node, Node** effect_ptr,
- Node* control) {
- // TODO(eholk): generate code to modify the thread-local storage directly,
- // rather than calling the runtime.
- if (!trap_handler::UseTrapHandler()) {
- return control;
- }
-
- const Runtime::FunctionId f =
- new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- DCHECK_EQ(0, fun->nargs);
- const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
- DCHECK_EQ(1, fun->result_size);
- Node* inputs[] = {centry_stub_node,
- jsgraph->ExternalConstant(
- ExternalReference(f, jsgraph->isolate())), // ref
- jsgraph->Int32Constant(fun->nargs), // arity
- jsgraph->NoContextConstant(),
- *effect_ptr,
- control};
-
- Node* node = jsgraph->graph()->NewNode(jsgraph->common()->Call(desc),
- arraysize(inputs), inputs);
- *effect_ptr = node;
- return node;
-}
-
-// Only call this function for code which is not reused across instantiations,
-// as we do not patch the embedded context.
-Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* centry_stub_node, Node* context,
- Node** parameters, int parameter_count,
- Node** effect_ptr, Node** control) {
- // Setting and clearing the thread-in-wasm flag should not be done as a normal
- // runtime call.
- DCHECK_NE(f, Runtime::kSetThreadInWasm);
- DCHECK_NE(f, Runtime::kClearThreadInWasm);
- // We're leaving Wasm code, so clear the flag.
- *control = BuildModifyThreadInWasmFlag(false, jsgraph, centry_stub_node,
- effect_ptr, *control);
-
- const Runtime::Function* fun = Runtime::FunctionForId(f);
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
- CallDescriptor::kNoFlags);
- // CEntryStubConstant nodes have to be created and cached in the main
- // thread. At the moment this is only done for CEntryStubConstant(1).
- DCHECK_EQ(1, fun->result_size);
- // At the moment we only allow 3 parameters. If more parameters are needed,
- // increase this constant accordingly.
- static const int kMaxParams = 3;
- DCHECK_GE(kMaxParams, parameter_count);
- Node* inputs[kMaxParams + 6];
- int count = 0;
- inputs[count++] = centry_stub_node;
- for (int i = 0; i < parameter_count; i++) {
- inputs[count++] = parameters[i];
- }
- inputs[count++] = jsgraph->ExternalConstant(
- ExternalReference(f, jsgraph->isolate())); // ref
- inputs[count++] = jsgraph->Int32Constant(fun->nargs); // arity
- inputs[count++] = context; // context
- inputs[count++] = *effect_ptr;
- inputs[count++] = *control;
-
- Node* node =
- jsgraph->graph()->NewNode(jsgraph->common()->Call(desc), count, inputs);
- *effect_ptr = node;
-
- // Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control = BuildModifyThreadInWasmFlag(true, jsgraph, centry_stub_node,
- effect_ptr, *control);
-
- return node;
-}
-
-Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* centry_stub_node, Node** parameters,
- int parameter_count, Node** effect_ptr,
- Node** control) {
- return BuildCallToRuntimeWithContext(f, jsgraph, centry_stub_node,
- jsgraph->NoContextConstant(), parameters,
- parameter_count, effect_ptr, control);
-}
-
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
@@ -296,6 +204,7 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
jsgraph()->ExternalConstant(
ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
jsgraph()->IntPtrConstant(0), *effect, *control);
+ *effect = limit;
Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
Node* check =
@@ -303,29 +212,49 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
stack_check.Chain(*control);
- Node* effect_true = *effect;
Handle<Code> code = jsgraph()->isolate()->builtins()->WasmStackGuard();
CallInterfaceDescriptor idesc =
WasmRuntimeCallDescriptor(jsgraph()->isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
- CallDescriptor::kNoFlags, Operator::kNoProperties);
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1, Linkage::kNoContext);
Node* stub_code = jsgraph()->HeapConstant(code);
- Node* context = jsgraph()->NoContextConstant();
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- context, *effect, stack_check.if_false);
+ *effect, stack_check.if_false);
SetSourcePosition(call, position);
- Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), effect_true,
+ Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), *effect,
call, stack_check.merge);
*control = stack_check.merge;
*effect = ephi;
}
+void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
+ if (!needs_stack_check_) return;
+
+ Node* start = graph()->start();
+ // Place a stack check which uses a dummy node as control and effect.
+ Node* dummy = graph()->NewNode(jsgraph()->common()->Dead());
+ Node* control = dummy;
+ Node* effect = dummy;
+ // The function-prologue stack check is associated with position 0, which
+ // is never a position of any instruction in the function.
+ StackCheck(0, &effect, &control);
+
+ // In testing, no steck checks were emitted. Nothing to rewire then.
+ if (effect == dummy) return;
+
+ // Now patch all control uses of {start} to use {control} and all effect uses
+ // to use {effect} instead. Then rewire the dummy node to use start instead.
+ NodeProperties::ReplaceUses(start, start, effect, control);
+ NodeProperties::ReplaceUses(dummy, nullptr, start, start);
+}
+
Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
wasm::WasmCodePosition position) {
const Operator* op;
@@ -590,7 +519,7 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
case wasm::kExprF64AsmjsStoreMem:
return BuildAsmjsStoreMem(MachineType::Float64(), left, right);
default:
- op = UnsupportedOpcode(opcode);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
return graph()->NewNode(op, left, right);
}
@@ -851,7 +780,7 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
case wasm::kExprF64AsmjsLoadMem:
return BuildAsmjsLoadMem(MachineType::Float64(), input);
default:
- op = UnsupportedOpcode(opcode);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
return graph()->NewNode(op, input);
}
@@ -916,7 +845,6 @@ Builtins::Name GetBuiltinIdForTrap(bool in_cctest, wasm::TrapReason reason) {
#undef TRAPREASON_TO_MESSAGE
default:
UNREACHABLE();
- return Builtins::builtin_count;
}
}
} // namespace
@@ -1081,8 +1009,157 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
-Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
- wasm::ValueType wasmtype) {
+Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
+ MachineType memtype,
+ wasm::ValueType wasmtype) {
+ Node* result;
+ Node* value = node;
+ MachineOperatorBuilder* m = jsgraph()->machine();
+ int valueSizeInBytes = 1 << ElementSizeLog2Of(wasmtype);
+ int valueSizeInBits = 8 * valueSizeInBytes;
+ bool isFloat = false;
+
+ switch (wasmtype) {
+ case wasm::kWasmF64:
+ value = graph()->NewNode(m->BitcastFloat64ToInt64(), node);
+ isFloat = true;
+ case wasm::kWasmI64:
+ result = jsgraph()->Int64Constant(0);
+ break;
+ case wasm::kWasmF32:
+ value = graph()->NewNode(m->BitcastFloat32ToInt32(), node);
+ isFloat = true;
+ case wasm::kWasmI32:
+ result = jsgraph()->Int32Constant(0);
+ break;
+ case wasm::kWasmS128:
+ DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ if (memtype.representation() == MachineRepresentation::kWord8) {
+ // No need to change endianness for byte size, return original node
+ return node;
+ }
+ if (wasmtype == wasm::kWasmI64 &&
+ memtype.representation() < MachineRepresentation::kWord64) {
+ // In case we store lower part of WasmI64 expression, we can truncate
+ // upper 32bits
+ value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
+ valueSizeInBytes = 1 << ElementSizeLog2Of(wasm::kWasmI32);
+ valueSizeInBits = 8 * valueSizeInBytes;
+ if (memtype.representation() == MachineRepresentation::kWord16) {
+ value =
+ graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
+ }
+ } else if (wasmtype == wasm::kWasmI32 &&
+ memtype.representation() == MachineRepresentation::kWord16) {
+ value =
+ graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
+ }
+
+ int i;
+ uint32_t shiftCount;
+
+ if (ReverseBytesSupported(m, valueSizeInBytes)) {
+ switch (valueSizeInBytes) {
+ case 4:
+ result = graph()->NewNode(m->Word32ReverseBytes().op(), value);
+ break;
+ case 8:
+ result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
+ break;
+ case 16: {
+ Node* byte_reversed_lanes[4];
+ for (int lane = 0; lane < 4; lane++) {
+ byte_reversed_lanes[lane] = graph()->NewNode(
+ m->Word32ReverseBytes().op(),
+ graph()->NewNode(jsgraph()->machine()->I32x4ExtractLane(lane),
+ value));
+ }
+
+ // This is making a copy of the value.
+ result =
+ graph()->NewNode(jsgraph()->machine()->S128And(), value, value);
+
+ for (int lane = 0; lane < 4; lane++) {
+ result =
+ graph()->NewNode(jsgraph()->machine()->I32x4ReplaceLane(3 - lane),
+ result, byte_reversed_lanes[lane]);
+ }
+
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ for (i = 0, shiftCount = valueSizeInBits - 8; i < valueSizeInBits / 2;
+ i += 8, shiftCount -= 16) {
+ Node* shiftLower;
+ Node* shiftHigher;
+ Node* lowerByte;
+ Node* higherByte;
+
+ DCHECK(shiftCount > 0);
+ DCHECK((shiftCount + 8) % 16 == 0);
+
+ if (valueSizeInBits > 32) {
+ shiftLower = graph()->NewNode(m->Word64Shl(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word64Shr(), value,
+ jsgraph()->Int64Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word64And(), shiftLower,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word64And(), shiftHigher,
+ jsgraph()->Int64Constant(static_cast<uint64_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word64Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word64Or(), result, higherByte);
+ } else {
+ shiftLower = graph()->NewNode(m->Word32Shl(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ shiftHigher = graph()->NewNode(m->Word32Shr(), value,
+ jsgraph()->Int32Constant(shiftCount));
+ lowerByte = graph()->NewNode(
+ m->Word32And(), shiftLower,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF)
+ << (valueSizeInBits - 8 - i)));
+ higherByte = graph()->NewNode(
+ m->Word32And(), shiftHigher,
+ jsgraph()->Int32Constant(static_cast<uint32_t>(0xFF) << i));
+ result = graph()->NewNode(m->Word32Or(), result, lowerByte);
+ result = graph()->NewNode(m->Word32Or(), result, higherByte);
+ }
+ }
+ }
+
+ if (isFloat) {
+ switch (wasmtype) {
+ case wasm::kWasmF64:
+ result = graph()->NewNode(m->BitcastInt64ToFloat64(), result);
+ break;
+ case wasm::kWasmF32:
+ result = graph()->NewNode(m->BitcastInt32ToFloat32(), result);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ return result;
+}
+
+Node* WasmGraphBuilder::BuildChangeEndiannessLoad(Node* node,
+ MachineType memtype,
+ wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1711,11 +1788,7 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
}
Node* WasmGraphBuilder::GrowMemory(Node* input) {
- // GrowMemory will not be called from asm.js, hence we cannot be in
- // lazy-compilation mode, hence the instance will be set.
- DCHECK_NOT_NULL(module_);
- DCHECK_NOT_NULL(module_->instance);
-
+ SetNeedsStackCheck();
Diamond check_input_range(
graph(), jsgraph()->common(),
graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
@@ -1726,9 +1799,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Node* parameters[] = {BuildChangeUint32ToSmi(input)};
Node* old_effect = *effect_;
- Node* call = BuildCallToRuntime(
- Runtime::kWasmGrowMemory, jsgraph(), centry_stub_node_, parameters,
- arraysize(parameters), effect_, &check_input_range.if_true);
+ *control_ = check_input_range.if_true;
+ Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, parameters,
+ arraysize(parameters));
Node* result = BuildChangeSmiToInt32(call);
@@ -1741,6 +1814,7 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
}
Node* WasmGraphBuilder::Throw(Node* input) {
+ SetNeedsStackCheck();
MachineOperatorBuilder* machine = jsgraph()->machine();
// Pass the thrown value as two SMIs:
@@ -1758,18 +1832,17 @@ Node* WasmGraphBuilder::Throw(Node* input) {
graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
Node* parameters[] = {lower, upper}; // thrown value
- return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), centry_stub_node_,
- parameters, arraysize(parameters), effect_,
- control_);
+ return BuildCallToRuntime(Runtime::kWasmThrow, parameters,
+ arraysize(parameters));
}
Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
+ SetNeedsStackCheck();
CommonOperatorBuilder* common = jsgraph()->common();
Node* parameters[] = {input}; // caught value
Node* value = BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue,
- jsgraph(), centry_stub_node_, parameters,
- arraysize(parameters), effect_, control_);
+ parameters, arraysize(parameters));
Node* is_smi;
Node* is_heap;
@@ -2127,8 +2200,6 @@ Node* WasmGraphBuilder::BuildDiv64Call(Node* left, Node* right,
Node* call = BuildCCall(sig_builder.Build(), args);
- // TODO(wasm): This can get simpler if we have a specialized runtime call to
- // throw WASM exceptions by trap code instead of by string.
ZeroCheck32(static_cast<wasm::TrapReason>(trap_zero), call, position);
TrapIfEq32(wasm::kTrapDivUnrepresentable, call, -1, position);
const Operator* load_op = jsgraph()->machine()->Load(result_type);
@@ -2163,6 +2234,7 @@ Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
Node*** rets,
wasm::WasmCodePosition position) {
+ SetNeedsStackCheck();
const size_t params = sig->parameter_count();
const size_t extra = 2; // effect and control inputs.
const size_t count = 1 + params + extra;
@@ -2174,8 +2246,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
args[params + 1] = *effect_;
args[params + 2] = *control_;
- CallDescriptor* descriptor =
- wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ CallDescriptor* descriptor = GetWasmCallDescriptor(jsgraph()->zone(), sig);
const Operator* op = jsgraph()->common()->Call(descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
SetSourcePosition(call, position);
@@ -2414,12 +2485,12 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
return jsgraph()->UndefinedConstant();
default:
UNREACHABLE();
- return nullptr;
}
}
Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
- Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ Callable callable =
+ Builtins::CallableFor(jsgraph()->isolate(), Builtins::kToNumber);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
CallDescriptor::kNoFlags, Operator::kNoProperties);
@@ -2515,7 +2586,6 @@ Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
break;
default:
UNREACHABLE();
- return nullptr;
}
return num;
}
@@ -2619,16 +2689,14 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
graph()->start());
// Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(true);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the context of the calling javascript function
// (passed as a parameter), such that the generated code is context
// independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- centry_stub_node_, context, nullptr, 0,
- effect_, control_);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ nullptr, 0);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2640,8 +2708,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// We only need a dummy call descriptor.
wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
- CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
- jsgraph()->zone(), dummy_sig_builder.Build());
+ CallDescriptor* desc =
+ GetWasmCallDescriptor(jsgraph()->zone(), dummy_sig_builder.Build());
*effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
Return(jsgraph()->UndefinedConstant());
return;
@@ -2650,7 +2718,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
int pos = 0;
args[pos++] = HeapConstant(wasm_code);
- // Convert JS parameters to WASM numbers.
+ // Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
Node* param = Param(i + 1);
Node* wasm_param = FromJS(param, context, sig->GetParam(i));
@@ -2660,16 +2728,14 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
args[pos++] = *effect_;
args[pos++] = *control_;
- // Call the WASM code.
- CallDescriptor* desc =
- wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ // Call the wasm code.
+ CallDescriptor* desc = GetWasmCallDescriptor(jsgraph()->zone(), sig);
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
*effect_ = call;
// Clear the ThreadInWasmFlag
- BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(false);
Node* retval = call;
Node* jsval = ToJS(
@@ -2679,7 +2745,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig) {
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
int param_index = 0;
for (int i = 0; i < param_count; ++i) {
Node* param = Param(param_index++);
@@ -2706,9 +2772,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
// regenerated at instantiation time.
Node* context =
jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- centry_stub_node_, context, nullptr, 0,
- effect_, control_);
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
+ nullptr, 0);
// We don't need to return a value here, as the runtime call will not return
// anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
@@ -2719,8 +2784,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* call = nullptr;
- BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(false);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
@@ -2740,7 +2804,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
desc = Linkage::GetJSCallDescriptor(
graph()->zone(), false, wasm_count + 1, CallDescriptor::kNoFlags);
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig);
args[pos++] = jsgraph()->UndefinedConstant(); // new target
@@ -2767,7 +2831,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
callable.descriptor(), wasm_count + 1,
CallDescriptor::kNoFlags);
- // Convert WASM numbers to JS values.
+ // Convert wasm numbers to JS values.
pos = AddParameterNodes(args, pos, wasm_count, sig);
// The native_context is sufficient here, because all kind of callables
@@ -2785,8 +2849,7 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = call;
SetSourcePosition(call, 0);
- BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
- *control_);
+ BuildModifyThreadInWasmFlag(true);
// Convert the return value back.
Node* val = sig->return_count() == 0
@@ -2823,10 +2886,11 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
// Get a stack slot for the arguments.
- Node* arg_buffer = args_size_bytes == 0 && return_size_bytes == 0
- ? jsgraph()->IntPtrConstant(0)
- : graph()->NewNode(jsgraph()->machine()->StackSlot(
- std::max(args_size_bytes, return_size_bytes)));
+ Node* arg_buffer =
+ args_size_bytes == 0 && return_size_bytes == 0
+ ? jsgraph()->IntPtrConstant(0)
+ : graph()->NewNode(jsgraph()->machine()->StackSlot(
+ std::max(args_size_bytes, return_size_bytes), 8));
// Now store all our arguments to the buffer.
int param_index = 0;
@@ -2836,26 +2900,23 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
Node* param = Param(param_index++);
if (Int64Lowering::IsI64AsTwoParameters(jsgraph()->machine(),
sig->GetParam(i))) {
- StoreRepresentation store_rep(wasm::kWasmI32,
- WriteBarrierKind::kNoWriteBarrier);
- *effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(offset + kInt64LowerHalfMemoryOffset),
- param, *effect_, *control_);
+ int lower_half_offset = offset + kInt64LowerHalfMemoryOffset;
+ int upper_half_offset = offset + kInt64UpperHalfMemoryOffset;
+
+ *effect_ = graph()->NewNode(
+ GetSafeStoreOperator(lower_half_offset, wasm::kWasmI32), arg_buffer,
+ Int32Constant(lower_half_offset), param, *effect_, *control_);
param = Param(param_index++);
- *effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
- Int32Constant(offset + kInt64UpperHalfMemoryOffset),
- param, *effect_, *control_);
+ *effect_ = graph()->NewNode(
+ GetSafeStoreOperator(upper_half_offset, wasm::kWasmI32), arg_buffer,
+ Int32Constant(upper_half_offset), param, *effect_, *control_);
offset += 8;
} else {
MachineRepresentation param_rep = sig->GetParam(i);
- StoreRepresentation store_rep(param_rep,
- WriteBarrierKind::kNoWriteBarrier);
*effect_ =
- graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ graph()->NewNode(GetSafeStoreOperator(offset, param_rep), arg_buffer,
Int32Constant(offset), param, *effect_, *control_);
offset += 1 << ElementSizeLog2Of(param_rep);
}
@@ -2871,8 +2932,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
jsgraph()->SmiConstant(function_index), // function index
arg_buffer, // argument buffer
};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), centry_stub_node_,
- parameters, arraysize(parameters), effect_, control_);
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, parameters,
+ arraysize(parameters));
// Read back the return value.
if (sig->return_count() == 0) {
@@ -2911,17 +2972,16 @@ Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
return mem_buffer_;
} else {
return jsgraph()->RelocatableIntPtrConstant(
- mem_start + offset, RelocInfo::WASM_MEMORY_REFERENCE);
+ static_cast<uintptr_t>(mem_start + offset),
+ RelocInfo::WASM_MEMORY_REFERENCE);
}
}
Node* WasmGraphBuilder::CurrentMemoryPages() {
- // CurrentMemoryPages will not be called from asm.js, hence we cannot be in
- // lazy-compilation mode, hence the instance will be set.
- DCHECK_EQ(wasm::kWasmOrigin, module_->module->get_origin());
- Node* call =
- BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), centry_stub_node_,
- nullptr, 0, effect_, control_);
+ // CurrentMemoryPages can not be called from asm.js.
+ DCHECK_EQ(wasm::kWasmOrigin, module_->module->origin());
+ SetNeedsStackCheck();
+ Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, nullptr, 0);
Node* result = BuildChangeSmiToInt32(call);
return result;
}
@@ -2954,6 +3014,91 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
}
}
+Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
+ // TODO(eholk): generate code to modify the thread-local storage directly,
+ // rather than calling the runtime.
+ if (!trap_handler::UseTrapHandler()) {
+ return *control_;
+ }
+
+ const Runtime::FunctionId f =
+ new_value ? Runtime::kSetThreadInWasm : Runtime::kClearThreadInWasm;
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ DCHECK_EQ(0, fun->nargs);
+ const CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ Node* inputs[] = {centry_stub_node_,
+ jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())), // ref
+ jsgraph()->Int32Constant(fun->nargs), // arity
+ jsgraph()->NoContextConstant(),
+ *effect_,
+ *control_};
+
+ Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
+ arraysize(inputs), inputs);
+ *effect_ = node;
+ return node;
+}
+
+// Only call this function for code which is not reused across instantiations,
+// as we do not patch the embedded context.
+Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(Runtime::FunctionId f,
+ Node* context,
+ Node** parameters,
+ int parameter_count) {
+ // Setting and clearing the thread-in-wasm flag should not be done as a normal
+ // runtime call.
+ DCHECK_NE(f, Runtime::kSetThreadInWasm);
+ DCHECK_NE(f, Runtime::kClearThreadInWasm);
+ // We're leaving Wasm code, so clear the flag.
+ *control_ = BuildModifyThreadInWasmFlag(false);
+
+ const Runtime::Function* fun = Runtime::FunctionForId(f);
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ jsgraph()->zone(), f, fun->nargs, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ // CEntryStubConstant nodes have to be created and cached in the main
+ // thread. At the moment this is only done for CEntryStubConstant(1).
+ DCHECK_EQ(1, fun->result_size);
+ // At the moment we only allow 3 parameters. If more parameters are needed,
+ // increase this constant accordingly.
+ static const int kMaxParams = 3;
+ DCHECK_GE(kMaxParams, parameter_count);
+ Node* inputs[kMaxParams + 6];
+ int count = 0;
+ inputs[count++] = centry_stub_node_;
+ for (int i = 0; i < parameter_count; i++) {
+ inputs[count++] = parameters[i];
+ }
+ inputs[count++] = jsgraph()->ExternalConstant(
+ ExternalReference(f, jsgraph()->isolate())); // ref
+ inputs[count++] = jsgraph()->Int32Constant(fun->nargs); // arity
+ inputs[count++] = context; // context
+ inputs[count++] = *effect_;
+ inputs[count++] = *control_;
+
+ Node* node = jsgraph()->graph()->NewNode(jsgraph()->common()->Call(desc),
+ count, inputs);
+ *effect_ = node;
+
+ // Restore the thread-in-wasm flag, since we have returned to Wasm.
+ *control_ = BuildModifyThreadInWasmFlag(true);
+
+ return node;
+}
+
+Node* WasmGraphBuilder::BuildCallToRuntime(Runtime::FunctionId f,
+ Node** parameters,
+ int parameter_count) {
+ return BuildCallToRuntimeWithContext(f, jsgraph()->NoContextConstant(),
+ parameters, parameter_count);
+}
+
Node* WasmGraphBuilder::GetGlobal(uint32_t index) {
MachineType mem_type =
wasm::WasmOpcodes::MachineTypeFor(module_->GetGlobalType(index));
@@ -3042,13 +3187,25 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
+const Operator* WasmGraphBuilder::GetSafeStoreOperator(int offset,
+ wasm::ValueType type) {
+ int alignment = offset % (1 << ElementSizeLog2Of(type));
+ if (alignment == 0 || jsgraph()->machine()->UnalignedStoreSupported(
+ MachineType::TypeForRepresentation(type), 0)) {
+ StoreRepresentation rep(type, WriteBarrierKind::kNoWriteBarrier);
+ return jsgraph()->machine()->Store(rep);
+ }
+ UnalignedStoreRepresentation rep(type);
+ return jsgraph()->machine()->UnalignedStore(rep);
+}
+
Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
- // WASM semantics throw on OOB. Introduce explicit bounds check.
+ // Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
@@ -3075,7 +3232,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
*effect_ = load;
#if defined(V8_TARGET_BIG_ENDIAN)
- load = BuildChangeEndianness(load, memtype, type);
+ load = BuildChangeEndiannessLoad(load, memtype, type);
#endif
if (type == wasm::kWasmI64 &&
@@ -3094,19 +3251,19 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-
Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment, Node* val,
- wasm::WasmCodePosition position) {
+ wasm::WasmCodePosition position,
+ wasm::ValueType type) {
Node* store;
- // WASM semantics throw on OOB. Introduce explicit bounds check.
+ // Wasm semantics throw on OOB. Introduce explicit bounds check.
if (!FLAG_wasm_trap_handler || !V8_TRAP_HANDLER_SUPPORTED) {
BoundsCheckMem(memtype, index, offset, position);
}
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndianness(val, memtype);
+ val = BuildChangeEndiannessStore(val, memtype, type);
#endif
if (memtype.representation() == MachineRepresentation::kWord8 ||
@@ -3193,23 +3350,7 @@ Node* WasmGraphBuilder::S128Zero() {
return graph()->NewNode(jsgraph()->machine()->S128Zero());
}
-Node* WasmGraphBuilder::S1x4Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x4Zero());
-}
-
-Node* WasmGraphBuilder::S1x8Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x8Zero());
-}
-
-Node* WasmGraphBuilder::S1x16Zero() {
- has_simd_ = true;
- return graph()->NewNode(jsgraph()->machine()->S1x16Zero());
-}
-
-Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
- const NodeVector& inputs) {
+Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4Splat:
@@ -3307,17 +3448,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I32x4Ne(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4LeS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI32x4GtS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI32x4GtS:
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GeS:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4UConvertI16x8Low:
return graph()->NewNode(jsgraph()->machine()->I32x4UConvertI16x8Low(),
inputs[0]);
@@ -3331,17 +3472,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I32x4MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI32x4LtU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI32x4LeU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI32x4GtU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI32x4GtU:
+ return graph()->NewNode(jsgraph()->machine()->I32x4GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4GeU:
- return graph()->NewNode(jsgraph()->machine()->I32x4LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I32x4GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Splat:
return graph()->NewNode(jsgraph()->machine()->I16x8Splat(), inputs[0]);
case wasm::kExprI16x8SConvertI8x16Low:
@@ -3386,17 +3527,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I16x8Ne(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8LeS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI16x8GtS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI16x8GtS:
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GeS:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8UConvertI8x16Low:
return graph()->NewNode(jsgraph()->machine()->I16x8UConvertI8x16Low(),
inputs[0]);
@@ -3419,17 +3560,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I16x8MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI16x8LtU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI16x8LeU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI16x8GtU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI16x8GtU:
+ return graph()->NewNode(jsgraph()->machine()->I16x8GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8GeU:
- return graph()->NewNode(jsgraph()->machine()->I16x8LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I16x8GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16Splat:
return graph()->NewNode(jsgraph()->machine()->I8x16Splat(), inputs[0]);
case wasm::kExprI8x16Neg:
@@ -3465,17 +3606,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I8x16Ne(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtS(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16LeS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[0],
- inputs[1]);
- case wasm::kExprI8x16GtS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtS(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeS(), inputs[1],
inputs[0]);
+ case wasm::kExprI8x16GtS:
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GeS:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeS(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeS(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16UConvertI16x8:
return graph()->NewNode(jsgraph()->machine()->I8x16UConvertI16x8(),
inputs[0], inputs[1]);
@@ -3492,17 +3633,17 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->I8x16MaxU(), inputs[0],
inputs[1]);
case wasm::kExprI8x16LtU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[0],
- inputs[1]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtU(), inputs[1],
+ inputs[0]);
case wasm::kExprI8x16LeU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[0],
- inputs[1]);
- case wasm::kExprI8x16GtU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LtU(), inputs[1],
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeU(), inputs[1],
inputs[0]);
+ case wasm::kExprI8x16GtU:
+ return graph()->NewNode(jsgraph()->machine()->I8x16GtU(), inputs[0],
+ inputs[1]);
case wasm::kExprI8x16GeU:
- return graph()->NewNode(jsgraph()->machine()->I8x16LeU(), inputs[1],
- inputs[0]);
+ return graph()->NewNode(jsgraph()->machine()->I8x16GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprS128And:
return graph()->NewNode(jsgraph()->machine()->S128And(), inputs[0],
inputs[1]);
@@ -3514,67 +3655,28 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
inputs[1]);
case wasm::kExprS128Not:
return graph()->NewNode(jsgraph()->machine()->S128Not(), inputs[0]);
- case wasm::kExprS32x4Select:
- return graph()->NewNode(jsgraph()->machine()->S32x4Select(), inputs[0],
- inputs[1], inputs[2]);
- case wasm::kExprS16x8Select:
- return graph()->NewNode(jsgraph()->machine()->S16x8Select(), inputs[0],
- inputs[1], inputs[2]);
- case wasm::kExprS8x16Select:
- return graph()->NewNode(jsgraph()->machine()->S8x16Select(), inputs[0],
+ case wasm::kExprS128Select:
+ return graph()->NewNode(jsgraph()->machine()->S128Select(), inputs[0],
inputs[1], inputs[2]);
- case wasm::kExprS1x4And:
- return graph()->NewNode(jsgraph()->machine()->S1x4And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Or:
- return graph()->NewNode(jsgraph()->machine()->S1x4Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x4Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x4Not:
- return graph()->NewNode(jsgraph()->machine()->S1x4Not(), inputs[0]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x4AllTrue(), inputs[0]);
- case wasm::kExprS1x8And:
- return graph()->NewNode(jsgraph()->machine()->S1x8And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Or:
- return graph()->NewNode(jsgraph()->machine()->S1x8Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x8Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x8Not:
- return graph()->NewNode(jsgraph()->machine()->S1x8Not(), inputs[0]);
case wasm::kExprS1x8AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x8AnyTrue(), inputs[0]);
case wasm::kExprS1x8AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x8AllTrue(), inputs[0]);
- case wasm::kExprS1x16And:
- return graph()->NewNode(jsgraph()->machine()->S1x16And(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Or:
- return graph()->NewNode(jsgraph()->machine()->S1x16Or(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Xor:
- return graph()->NewNode(jsgraph()->machine()->S1x16Xor(), inputs[0],
- inputs[1]);
- case wasm::kExprS1x16Not:
- return graph()->NewNode(jsgraph()->machine()->S1x16Not(), inputs[0]);
case wasm::kExprS1x16AnyTrue:
return graph()->NewNode(jsgraph()->machine()->S1x16AnyTrue(), inputs[0]);
case wasm::kExprS1x16AllTrue:
return graph()->NewNode(jsgraph()->machine()->S1x16AllTrue(), inputs[0]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
- const NodeVector& inputs) {
+ Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprF32x4ExtractLane:
@@ -3602,12 +3704,12 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
return graph()->NewNode(jsgraph()->machine()->I8x16ReplaceLane(lane),
inputs[0], inputs[1]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
- const NodeVector& inputs) {
+ Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4Shl:
@@ -3635,27 +3737,15 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
return graph()->NewNode(jsgraph()->machine()->I8x16ShrU(shift),
inputs[0]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ FATAL_UNSUPPORTED_OPCODE(opcode);
}
}
-Node* WasmGraphBuilder::SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
- const NodeVector& inputs) {
+Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
+ Node* const* inputs) {
has_simd_ = true;
- switch (lanes) {
- case 4:
- return graph()->NewNode(jsgraph()->machine()->S32x4Shuffle(shuffle),
- inputs[0], inputs[1]);
- case 8:
- return graph()->NewNode(jsgraph()->machine()->S16x8Shuffle(shuffle),
- inputs[0], inputs[1]);
- case 16:
- return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
- inputs[0], inputs[1]);
- default:
- UNREACHABLE();
- return nullptr;
- }
+ return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
+ inputs[0], inputs[1]);
}
static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
@@ -3672,7 +3762,7 @@ static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<String> name_str =
isolate->factory()->NewStringFromAsciiChecked(buffer.start());
Handle<String> script_str =
- isolate->factory()->NewStringFromAsciiChecked("(WASM)");
+ isolate->factory()->NewStringFromAsciiChecked("(wasm)");
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *shared,
@@ -3793,10 +3883,9 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
// Schedule and compile to machine code.
- CallDescriptor* incoming =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
if (machine.Is32()) {
- incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
Code::Flags flags = Code::ComputeFlags(Code::WASM_TO_JS_FUNCTION);
bool debugging =
@@ -3854,7 +3943,10 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Zone zone(isolate->allocator(), ZONE_NAME);
Graph graph(&zone);
CommonOperatorBuilder common(&zone);
- MachineOperatorBuilder machine(&zone);
+ MachineOperatorBuilder machine(
+ &zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements());
JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
@@ -3875,10 +3967,9 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
}
// Schedule and compile to machine code.
- CallDescriptor* incoming =
- wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ CallDescriptor* incoming = GetWasmCallDescriptor(&zone, sig);
if (machine.Is32()) {
- incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
EmbeddedVector<char, 32> debug_name;
@@ -3981,135 +4072,152 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function,
- bool is_sync)
+ Handle<Code> centry_stub)
: WasmCompilationUnit(
isolate, &module_env->module_env,
wasm::FunctionBody{
- function->sig, module_env->wire_bytes.start(),
- module_env->wire_bytes.start() + function->code_start_offset,
- module_env->wire_bytes.start() + function->code_end_offset},
+ function->sig, function->code.offset(),
+ module_env->wire_bytes.start() + function->code.offset(),
+ module_env->wire_bytes.start() + function->code.end_offset()},
module_env->wire_bytes.GetNameOrNull(function), function->func_index,
- is_sync) {}
+ centry_stub) {}
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleEnv* module_env,
wasm::FunctionBody body,
wasm::WasmName name, int index,
- bool is_sync)
+ Handle<Code> centry_stub)
: isolate_(isolate),
module_env_(module_env),
func_body_(body),
func_name_(name),
- is_sync_(is_sync),
- centry_stub_(CEntryStub(isolate, 1).GetCode()),
+ counters_(isolate->counters()),
+ centry_stub_(centry_stub),
+ func_index_(index) {}
+
+WasmCompilationUnit::WasmCompilationUnit(
+ Isolate* isolate, wasm::ModuleBytesEnv* module_env,
+ const wasm::WasmFunction* function, Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters)
+ : WasmCompilationUnit(
+ isolate, &module_env->module_env,
+ wasm::FunctionBody{
+ function->sig, function->code.offset(),
+ module_env->wire_bytes.start() + function->code.offset(),
+ module_env->wire_bytes.start() + function->code.end_offset()},
+ module_env->wire_bytes.GetNameOrNull(function), function->func_index,
+ centry_stub, async_counters) {}
+
+WasmCompilationUnit::WasmCompilationUnit(
+ Isolate* isolate, wasm::ModuleEnv* module_env, wasm::FunctionBody body,
+ wasm::WasmName name, int index, Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters)
+ : isolate_(isolate),
+ module_env_(module_env),
+ func_body_(body),
+ func_name_(name),
+ counters_(async_counters.get()),
+ centry_stub_(centry_stub),
func_index_(index) {}
void WasmCompilationUnit::ExecuteCompilation() {
- if (is_sync_) {
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- HistogramTimerScope wasm_compile_function_time_scope(
- isolate_->counters()->wasm_compile_function_time());
- ExecuteCompilationInternal();
- }
- ExecuteCompilationInternal();
- // Record the memory cost this unit places on the system until
- // it is finalized. That may be "0" in error cases.
- if (job_) {
- size_t cost = job_->AllocatedMemory();
- set_memory_cost(cost);
- }
-}
+ TimedHistogramScope wasm_compile_function_time_scope(
+ counters()->wasm_compile_function_time());
-void WasmCompilationUnit::ExecuteCompilationInternal() {
if (FLAG_trace_wasm_compiler) {
if (func_name_.start() != nullptr) {
- PrintF("Compiling WASM function %d:'%.*s'\n\n", func_index(),
+ PrintF("Compiling wasm function %d:'%.*s'\n\n", func_index(),
func_name_.length(), func_name_.start());
} else {
- PrintF("Compiling WASM function %d:<unnamed>\n\n", func_index());
+ PrintF("Compiling wasm function %d:<unnamed>\n\n", func_index());
}
}
double decode_ms = 0;
size_t node_count = 0;
- Zone graph_zone(isolate_->allocator(), ZONE_NAME);
- jsgraph_ = new (&graph_zone) JSGraph(
- isolate_, new (&graph_zone) Graph(&graph_zone),
- new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
- new (&graph_zone) MachineOperatorBuilder(
- &graph_zone, MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()));
- SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
+ // Scope for the {graph_zone}.
+ {
+ Zone graph_zone(isolate_->allocator(), ZONE_NAME);
+ jsgraph_ = new (&graph_zone) JSGraph(
+ isolate_, new (&graph_zone) Graph(&graph_zone),
+ new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
+ new (&graph_zone) MachineOperatorBuilder(
+ &graph_zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
+ SourcePositionTable* source_positions =
+ BuildGraphForWasmFunction(&decode_ms);
- if (graph_construction_result_.failed()) {
- ok_ = false;
- return;
- }
+ if (graph_construction_result_.failed()) {
+ ok_ = false;
+ return;
+ }
- base::ElapsedTimer pipeline_timer;
- if (FLAG_trace_wasm_decode_time) {
- node_count = jsgraph_->graph()->NodeCount();
- pipeline_timer.Start();
- }
+ base::ElapsedTimer pipeline_timer;
+ if (FLAG_trace_wasm_decode_time) {
+ node_count = jsgraph_->graph()->NodeCount();
+ pipeline_timer.Start();
+ }
- compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
+ compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
- // Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- compilation_zone_.get(), func_body_.sig);
- if (jsgraph_->machine()->Is32()) {
- descriptor = module_env_->GetI32WasmCallDescriptor(compilation_zone_.get(),
- descriptor);
- }
- info_.reset(new CompilationInfo(
- GetDebugName(compilation_zone_.get(), func_name_, func_index_), isolate_,
- compilation_zone_.get(), Code::ComputeFlags(Code::WASM_FUNCTION)));
- ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
- compilation_zone_.get());
-
- job_.reset(Pipeline::NewWasmCompilationJob(
- info_.get(), jsgraph_, descriptor, source_positions,
- &protected_instructions, !module_env_->module->is_wasm()));
- ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
- // TODO(bradnelson): Improve histogram handling of size_t.
- if (is_sync_)
- // TODO(karlschimpf): Make this work when asynchronous.
- // https://bugs.chromium.org/p/v8/issues/detail?id=6361
- isolate_->counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
+ // Run the compiler pipeline to generate machine code.
+ CallDescriptor* descriptor =
+ GetWasmCallDescriptor(compilation_zone_.get(), func_body_.sig);
+ if (jsgraph_->machine()->Is32()) {
+ descriptor =
+ GetI32WasmCallDescriptor(compilation_zone_.get(), descriptor);
+ }
+ info_.reset(new CompilationInfo(
+ GetDebugName(compilation_zone_.get(), func_name_, func_index_),
+ isolate_, compilation_zone_.get(),
+ Code::ComputeFlags(Code::WASM_FUNCTION)));
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
+ compilation_zone_.get());
+
+ job_.reset(Pipeline::NewWasmCompilationJob(
+ info_.get(), jsgraph_, descriptor, source_positions,
+ &protected_instructions, module_env_->module->origin()));
+ ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
+ // TODO(bradnelson): Improve histogram handling of size_t.
+ counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
static_cast<int>(jsgraph_->graph()->zone()->allocation_size()));
- if (FLAG_trace_wasm_decode_time) {
- double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
- PrintF(
- "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
- "%0.3f ms pipeline\n",
- static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
- node_count, pipeline_ms);
+ if (FLAG_trace_wasm_decode_time) {
+ double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
+ PrintF(
+ "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
+ "%0.3f ms pipeline\n",
+ static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
+ node_count, pipeline_ms);
+ }
+ // The graph zone is about to get out of scope. Avoid invalid references.
+ jsgraph_ = nullptr;
}
- // The graph zone is about to get out of scope. Avoid invalid references.
- jsgraph_ = nullptr;
+
+ // Record the memory cost this unit places on the system until
+ // it is finalized.
+ size_t cost = job_->AllocatedMemory();
+ set_memory_cost(cost);
}
-Handle<Code> WasmCompilationUnit::FinishCompilation(
+MaybeHandle<Code> WasmCompilationUnit::FinishCompilation(
wasm::ErrorThrower* thrower) {
if (!ok_) {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
if (func_name_.start() == nullptr) {
- SNPrintF(buffer,
- "Compiling WASM function #%d:%.*s failed:", func_index_,
- func_name_.length(), func_name_.start());
+ SNPrintF(buffer, "Compiling wasm function #%d failed", func_index_);
} else {
- SNPrintF(buffer, "Compiling WASM function #%d failed:", func_index_);
+ SNPrintF(buffer, "Compiling wasm function #%d:%.*s failed", func_index_,
+ func_name_.length(), func_name_.start());
}
thrower->CompileFailed(buffer.start(), graph_construction_result_);
}
- return Handle<Code>::null();
+ return {};
}
base::ElapsedTimer codegen_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -4139,10 +4247,11 @@ Handle<Code> WasmCompilationUnit::FinishCompilation(
}
// static
-Handle<Code> WasmCompilationUnit::CompileWasmFunction(
+MaybeHandle<Code> WasmCompilationUnit::CompileWasmFunction(
wasm::ErrorThrower* thrower, Isolate* isolate,
wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(isolate, module_env, function);
+ WasmCompilationUnit unit(isolate, module_env, function,
+ CEntryStub(isolate, 1).GetCode());
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index f356f624d7..bf763d4499 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -31,7 +31,7 @@ class SourcePositionTable;
} // namespace compiler
namespace wasm {
-// Forward declarations for some WASM data structures.
+// Forward declarations for some wasm data structures.
struct ModuleBytesEnv;
struct ModuleEnv;
struct WasmFunction;
@@ -47,23 +47,33 @@ typedef compiler::JSGraph TFGraph;
namespace compiler {
class WasmCompilationUnit final {
public:
+ // Use the following constructors if you know you are running on the
+ // foreground thread.
WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function, bool is_sync = true);
+ const wasm::WasmFunction* function,
+ Handle<Code> centry_stub);
WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
wasm::FunctionBody body, wasm::WasmName name, int index,
- bool is_sync = true);
+ Handle<Code> centry_stub);
+ // Use the following constructors if the compilation may run on a background
+ // thread.
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
+ const wasm::WasmFunction* function,
+ Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters);
+ WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
+ wasm::FunctionBody body, wasm::WasmName name, int index,
+ Handle<Code> centry_stub,
+ const std::shared_ptr<Counters>& async_counters);
int func_index() const { return func_index_; }
- void ReopenCentryStub() { centry_stub_ = handle(*centry_stub_, isolate_); }
- void InitializeHandles();
void ExecuteCompilation();
- Handle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
+ MaybeHandle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
- static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
- Isolate* isolate,
- wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function);
+ static MaybeHandle<Code> CompileWasmFunction(
+ wasm::ErrorThrower* thrower, Isolate* isolate,
+ wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function);
void set_memory_cost(size_t memory_cost) { memory_cost_ = memory_cost; }
size_t memory_cost() const { return memory_cost_; }
@@ -75,7 +85,7 @@ class WasmCompilationUnit final {
wasm::ModuleEnv* module_env_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
- bool is_sync_;
+ Counters* counters_;
// The graph zone is deallocated at the end of ExecuteCompilation by virtue of
// it being zone allocated.
JSGraph* jsgraph_ = nullptr;
@@ -90,12 +100,13 @@ class WasmCompilationUnit final {
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_ = true;
size_t memory_cost_ = 0;
- void ExecuteCompilationInternal();
+
+ Counters* counters() { return counters_; }
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
-// Wraps a JS function, producing a code object that can be called from WASM.
+// Wraps a JS function, producing a code object that can be called from wasm.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
@@ -113,9 +124,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
wasm::FunctionSig* sig,
Handle<WasmInstanceObject> instance);
-// Abstracts details of building TurboFan graph nodes for WASM to separate
-// the WASM decoder from the internal details of TurboFan.
-class WasmTrapHelper;
+// Abstracts details of building TurboFan graph nodes for wasm to separate
+// the wasm decoder from the internal details of TurboFan.
typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
@@ -168,6 +178,8 @@ class WasmGraphBuilder {
void StackCheck(wasm::WasmCodePosition position, Node** effect = nullptr,
Node** control = nullptr);
+ void PatchInStackCheckIfNeeded();
+
//-----------------------------------------------------------------------
// Operations that read and/or write {control} and {effect}.
//-----------------------------------------------------------------------
@@ -224,10 +236,9 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineType type, Node* index, uint32_t offset,
- uint32_t alignment, Node* val,
- wasm::WasmCodePosition position);
-
+ Node* StoreMem(MachineType memtype, Node* index, uint32_t offset,
+ uint32_t alignment, Node* val, wasm::WasmCodePosition position,
+ wasm::ValueType type = wasm::kWasmStmt);
static void PrintDebugName(Node* node);
Node* Control() { return *control_; }
@@ -250,16 +261,14 @@ class WasmGraphBuilder {
Node* S1x8Zero();
Node* S1x16Zero();
- Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
+ Node* SimdOp(wasm::WasmOpcode opcode, Node* const* inputs);
- Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
- const NodeVector& inputs);
+ Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane, Node* const* inputs);
Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
- const NodeVector& inputs);
+ Node* const* inputs);
- Node* SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
- const NodeVector& inputs);
+ Node* Simd8x16ShuffleOp(const uint8_t shuffle[16], Node* const* inputs);
bool has_simd() const { return has_simd_; }
@@ -267,7 +276,6 @@ class WasmGraphBuilder {
private:
static const int kDefaultBufferSize = 16;
- friend class WasmTrapHelper;
Zone* zone_;
JSGraph* jsgraph_;
@@ -284,6 +292,7 @@ class WasmGraphBuilder {
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
+ bool needs_stack_check_ = false;
wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
@@ -299,9 +308,11 @@ class WasmGraphBuilder {
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
wasm::WasmCodePosition position);
-
- Node* BuildChangeEndianness(Node* node, MachineType type,
- wasm::ValueType wasmtype = wasm::kWasmStmt);
+ const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
+ Node* BuildChangeEndiannessStore(Node* node, MachineType type,
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
+ Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -409,7 +420,29 @@ class WasmGraphBuilder {
int AddParameterNodes(Node** args, int pos, int param_count,
wasm::FunctionSig* sig);
+
+ void SetNeedsStackCheck() { needs_stack_check_ = true; }
+
+ //-----------------------------------------------------------------------
+ // Operations involving the CEntryStub, a dependency we want to remove
+ // to get off the GC heap.
+ //-----------------------------------------------------------------------
+ Node* BuildCallToRuntime(Runtime::FunctionId f, Node** parameters,
+ int parameter_count);
+
+ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, Node* context,
+ Node** parameters, int parameter_count);
+
+ Node* BuildModifyThreadInWasmFlag(bool new_value);
};
+
+V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(Zone* zone,
+ wasm::FunctionSig* sig);
+V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
+ Zone* zone, CallDescriptor* descriptor);
+V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptorForSimd(
+ Zone* zone, CallDescriptor* descriptor);
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index c739be5399..e5130fb63a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -8,38 +8,33 @@
#include "src/objects-inl.h"
#include "src/register-configuration.h"
-#include "src/wasm/wasm-module.h"
-
#include "src/compiler/linkage.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
-// TODO(titzer): this should not be in the WASM namespace.
-namespace wasm {
+namespace compiler {
-using compiler::LocationSignature;
-using compiler::CallDescriptor;
-using compiler::LinkageLocation;
+using wasm::ValueType;
namespace {
MachineType MachineTypeFor(ValueType type) {
switch (type) {
- case kWasmI32:
+ case wasm::kWasmI32:
return MachineType::Int32();
- case kWasmI64:
+ case wasm::kWasmI64:
return MachineType::Int64();
- case kWasmF64:
+ case wasm::kWasmF64:
return MachineType::Float64();
- case kWasmF32:
+ case wasm::kWasmF32:
return MachineType::Float32();
- case kWasmS128:
+ case wasm::kWasmS128:
return MachineType::Simd128();
default:
UNREACHABLE();
- return MachineType::AnyTagged();
}
}
@@ -74,14 +69,6 @@ LinkageLocation stackloc(int i, MachineType type) {
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
-#elif V8_TARGET_ARCH_X87
-// ===========================================================================
-// == x87 ====================================================================
-// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
-#define GP_RETURN_REGISTERS eax, edx
-#define FP_RETURN_REGISTERS stX_0
-
#elif V8_TARGET_ARCH_ARM
// ===========================================================================
// == arm ====================================================================
@@ -183,7 +170,7 @@ struct Allocator {
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == kWasmF32) {
+ if (type == wasm::kWasmF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code),
@@ -208,10 +195,11 @@ struct Allocator {
}
}
bool IsFloatingPoint(ValueType type) {
- return type == kWasmF32 || type == kWasmF64;
+ return type == wasm::kWasmF32 || type == wasm::kWasmF64;
}
int Words(ValueType type) {
- if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
+ if (kPointerSize < 8 &&
+ (type == wasm::kWasmI64 || type == wasm::kWasmF64)) {
return 2;
}
return 1;
@@ -276,8 +264,7 @@ static base::LazyInstance<Allocator, ReturnRegistersCreateTrait>::type
return_registers = LAZY_INSTANCE_INITIALIZER;
// General code uses the above configuration data.
-CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
- FunctionSig* fsig) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count());
@@ -302,7 +289,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
const RegList kCalleeSaveRegisters = 0;
const RegList kCalleeSaveFPRegisters = 0;
- // The target for WASM calls is always a code object.
+ // The target for wasm calls is always a code object.
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
@@ -380,20 +367,20 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
descriptor->debug_name());
}
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
- Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
+ CallDescriptor* descriptor) {
return ReplaceTypeInCallDescriptorWith(zone, descriptor, 2,
MachineType::Int64(),
MachineRepresentation::kWord32);
}
-CallDescriptor* ModuleEnv::GetI32WasmCallDescriptorForSimd(
- Zone* zone, CallDescriptor* descriptor) {
+CallDescriptor* GetI32WasmCallDescriptorForSimd(Zone* zone,
+ CallDescriptor* descriptor) {
return ReplaceTypeInCallDescriptorWith(zone, descriptor, 4,
MachineType::Simd128(),
MachineRepresentation::kWord32);
}
-} // namespace wasm
+} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 86c547f460..9e9be09ecb 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -20,7 +20,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define __ masm()->
+#define __ tasm()->
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
@@ -41,7 +41,7 @@ class X64OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kFloat64) {
- DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
+ DCHECK_EQ(0, constant.ToFloat64().AsUint64());
return Immediate(0);
}
if (RelocInfo::IsWasmReference(constant.rmode())) {
@@ -141,10 +141,8 @@ class X64OperandConverter : public InstructionOperandConverter {
}
case kMode_None:
UNREACHABLE();
- return Operand(no_reg, 0);
}
UNREACHABLE();
- return Operand(no_reg, 0);
}
Operand MemoryOperand(size_t first_input = 0) {
@@ -207,14 +205,15 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
: OutOfLineCode(gen),
result_(result),
input_(input),
- unwinding_info_writer_(unwinding_info_writer) {}
+ unwinding_info_writer_(unwinding_info_writer),
+ zone_(gen->zone()) {}
void Generate() final {
__ subp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(MemOperand(rsp, 0), input_);
- __ SlowTruncateToI(result_, rsp, 0);
+ __ SlowTruncateToIDelayed(zone_, result_, rsp, 0);
__ addp(rsp, Immediate(kDoubleSize));
unwinding_info_writer_->MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-kDoubleSize);
@@ -224,6 +223,7 @@ class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
Register const result_;
XMMRegister const input_;
UnwindingInfoWriter* const unwinding_info_writer_;
+ Zone* zone_;
};
@@ -238,7 +238,8 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
- mode_(mode) {}
+ mode_(mode),
+ zone_(gen->zone()) {}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
@@ -252,10 +253,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
__ leap(scratch1_, operand_);
- __ CallStub(&stub);
+ __ CallStubDelayed(
+ new (zone_) RecordWriteStub(nullptr, object_, scratch0_, scratch1_,
+ remembered_set_action, save_fp_mode));
}
private:
@@ -265,6 +266,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
+ Zone* zone_;
};
class WasmOutOfLineTrap final : public OutOfLineCode {
@@ -294,7 +296,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
// with AssembleArchTrap.
__ Push(Smi::FromInt(position_));
__ Move(rsi, Smi::kZero);
- __ CallRuntime(Runtime::kThrowWasmError);
+ __ CallRuntimeDelayed(gen_->zone(), Runtime::kThrowWasmError);
ReferenceMap* reference_map =
new (gen_->code()->zone()) ReferenceMap(gen_->code()->zone());
@@ -451,7 +453,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#define ASSEMBLE_AVX_BINOP(asm_instr) \
do { \
- CpuFeatureScope avx_scope(masm(), AVX); \
+ CpuFeatureScope avx_scope(tasm(), AVX); \
if (instr->InputAt(1)->IsFPRegister()) { \
__ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
i.InputDoubleRegister(1)); \
@@ -696,18 +698,18 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- __ PrepareCallCFunction(2); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ __ PrepareCallCFunction(2); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
} while (false)
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- __ PrepareCallCFunction(1); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 1); \
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ __ PrepareCallCFunction(1); \
+ __ CallCFunction( \
+ ExternalReference::ieee754_##name##_function(__ isolate()), 1); \
} while (false)
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
@@ -794,7 +796,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -812,13 +814,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
@@ -832,7 +834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ Call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -851,7 +853,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.TempRegister(2));
}
if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+ Handle<Code> code = i.InputCode(0);
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
@@ -1063,8 +1065,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
__ Movsd(xmm2, xmm0);
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
+ __ CallStubDelayed(new (zone())
+ MathPowStub(nullptr, MathPowStub::DOUBLE));
__ Movsd(xmm0, xmm3);
break;
}
@@ -1287,7 +1289,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Cvtss2sd);
break;
case kSSEFloat32Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1344,7 +1346,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// The following 2 instruction implicitly use rax.
__ fnstsw_ax();
if (CpuFeatures::IsSupported(SAHF)) {
- CpuFeatureScope sahf_scope(masm(), SAHF);
+ CpuFeatureScope sahf_scope(tasm(), SAHF);
__ sahf();
} else {
__ shrl(rax, Immediate(8));
@@ -1494,7 +1496,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
@@ -1763,7 +1765,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kAVXFloat32Cmp: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1787,7 +1789,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kAVXFloat64Cmp: {
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
if (instr->InputAt(1)->IsFPRegister()) {
__ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
@@ -1812,7 +1814,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 33);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1826,7 +1828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 31);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1840,7 +1842,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrlq(kScratchDoubleReg, kScratchDoubleReg, 1);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -1854,7 +1856,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(masm(), AVX);
+ CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsllq(kScratchDoubleReg, kScratchDoubleReg, 63);
if (instr->InputAt(0)->IsFPRegister()) {
@@ -2007,7 +2009,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movdqu: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
__ pc_offset());
if (instr->HasOutput()) {
@@ -2174,12 +2176,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kX64I32x4ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2188,6 +2190,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I32x4Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignd(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubd(dst, src);
+ }
+ break;
+ }
case kX64I32x4Shl: {
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2201,7 +2216,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4AddHoriz: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
__ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2210,17 +2225,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4Mul: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmulld(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2234,20 +2249,50 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I32x4GtS: {
+ __ pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I32x4GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsd(dst, src);
+ __ pcmpeqd(dst, src);
+ break;
+ }
case kX64I32x4ShrU: {
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kX64I32x4MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I32x4MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I32x4GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxud(dst, src);
+ __ pcmpeqd(dst, src);
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I32x4GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminud(dst, src);
+ __ pcmpeqd(dst, src);
+ break;
+ }
case kX64S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ xorps(dst, dst);
@@ -2262,14 +2307,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxwl(dst, dst);
break;
}
case kX64I16x8ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2278,6 +2323,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I16x8Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignw(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubw(dst, src);
+ }
+ break;
+ }
case kX64I16x8Shl: {
__ psllw(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2295,7 +2353,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8AddHoriz: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
__ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2308,17 +2366,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8Mul: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2332,6 +2390,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I16x8GtS: {
+ __ pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
case kX64I16x8ShrU: {
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
break;
@@ -2345,17 +2415,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I16x8MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I16x8MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I16x8GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxuw(dst, src);
+ __ pcmpeqw(dst, src);
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I16x8GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminuw(dst, src);
+ __ pcmpeqw(dst, src);
+ break;
+ }
case kX64I8x16Splat: {
- CpuFeatureScope sse_scope(masm(), SSSE3);
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
__ xorps(kScratchDoubleReg, kScratchDoubleReg);
@@ -2363,14 +2451,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16ExtractLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
Register dst = i.OutputRegister();
__ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsxbl(dst, dst);
break;
}
case kX64I8x16ReplaceLane: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
if (instr->InputAt(2)->IsRegister()) {
__ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
i.InputInt8(1));
@@ -2379,6 +2467,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64I8x16Neg: {
+ CpuFeatureScope sse_scope(tasm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psignb(dst, kScratchDoubleReg);
+ } else {
+ __ pxor(dst, dst);
+ __ psubb(dst, src);
+ }
+ break;
+ }
case kX64I8x16Add: {
__ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2396,12 +2497,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxS: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
@@ -2415,6 +2516,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
+ case kX64I8x16GtS: {
+ __ pcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16GeS: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminsb(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
case kX64I8x16AddSaturateU: {
__ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2424,15 +2537,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16MinU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
case kX64I8x16MaxU: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I8x16GtU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pmaxub(dst, src);
+ __ pcmpeqb(dst, src);
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16GeU: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ __ pminub(dst, src);
+ __ pcmpeqb(dst, src);
+ break;
+ }
case kX64S128And: {
__ pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2447,8 +2578,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64S128Not: {
XMMRegister dst = i.OutputSimd128Register();
- __ pcmpeqd(dst, dst);
- __ pxor(dst, i.InputSimd128Register(1));
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst.is(src)) {
+ __ movaps(kScratchDoubleReg, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, src);
+ }
+
break;
}
case kX64S128Select: {
@@ -2632,7 +2771,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
break;
}
UNREACHABLE();
- return no_condition;
}
} // namespace
@@ -2690,22 +2828,20 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
__ PrepareCallCFunction(0);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
+ __ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(
+ __ isolate()),
+ 0);
__ LeaveFrame(StackFrame::WASM_COMPILED);
__ Ret();
} else {
gen_->AssembleSourcePosition(instr_);
- __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+ __ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
ReferenceMap* reference_map =
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- if (FLAG_debug_code) {
- __ ud2();
- }
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
}
}
@@ -2788,9 +2924,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
: Deoptimizer::EAGER;
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
+ __ isolate(), deoptimization_id, bailout_type);
if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- if (isolate()->NeedsSourcePositionsForProfiling()) {
+ if (info()->is_source_positions_enabled()) {
__ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
}
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
@@ -2862,11 +2998,41 @@ void CodeGenerator::AssembleConstructFrame() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- shrink_slots -= static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
+ shrink_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
}
const RegList saves_fp = descriptor->CalleeSavedFPRegisters();
if (shrink_slots > 0) {
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ __ Move(kScratchRegister,
+ ExternalReference::address_of_real_stack_limit(__ isolate()));
+ __ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ __ addq(kScratchRegister, Immediate(shrink_slots * kPointerSize));
+ __ cmpq(rsp, kScratchRegister);
+ __ j(above_equal, &done);
+ }
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ __ Move(rsi, Smi::kZero);
+ __ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ bind(&done);
+ }
__ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
@@ -3023,12 +3189,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
case Constant::kFloat32:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ __ MoveNumber(dst, src.ToFloat32());
break;
case Constant::kFloat64:
- __ Move(dst,
- isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+ __ MoveNumber(dst, src.ToFloat64().value());
break;
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
@@ -3062,7 +3226,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src.type());
- uint64_t src_const = bit_cast<uint64_t>(src.ToFloat64());
+ uint64_t src_const = src.ToFloat64().AsUint64();
if (destination->IsFPRegister()) {
__ Move(g.ToDoubleRegister(destination), src_const);
} else {
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 959a7d2d03..9c268ededf 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -147,6 +147,7 @@ namespace compiler {
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
+ V(X64I32x4Neg) \
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
@@ -157,12 +158,17 @@ namespace compiler {
V(X64I32x4MaxS) \
V(X64I32x4Eq) \
V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
V(X64I16x8Splat) \
V(X64I16x8ExtractLane) \
V(X64I16x8ReplaceLane) \
+ V(X64I16x8Neg) \
V(X64I16x8Shl) \
V(X64I16x8ShrS) \
V(X64I16x8Add) \
@@ -175,14 +181,19 @@ namespace compiler {
V(X64I16x8MaxS) \
V(X64I16x8Eq) \
V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
V(X64I16x8ShrU) \
V(X64I16x8AddSaturateU) \
V(X64I16x8SubSaturateU) \
V(X64I16x8MinU) \
V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
V(X64I8x16Splat) \
V(X64I8x16ExtractLane) \
V(X64I8x16ReplaceLane) \
+ V(X64I8x16Neg) \
V(X64I8x16Add) \
V(X64I8x16AddSaturateS) \
V(X64I8x16Sub) \
@@ -191,10 +202,14 @@ namespace compiler {
V(X64I8x16MaxS) \
V(X64I8x16Eq) \
V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
V(X64I8x16AddSaturateU) \
V(X64I8x16SubSaturateU) \
V(X64I8x16MinU) \
V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
V(X64S128And) \
V(X64S128Or) \
V(X64S128Xor) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index 0f4c37f033..c5ef1e5a7a 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -126,6 +126,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
+ case kX64I32x4Neg:
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
@@ -136,12 +137,17 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4MaxS:
case kX64I32x4Eq:
case kX64I32x4Ne:
+ case kX64I32x4GtS:
+ case kX64I32x4GeS:
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
+ case kX64I32x4GtU:
+ case kX64I32x4GeU:
case kX64I16x8Splat:
case kX64I16x8ExtractLane:
case kX64I16x8ReplaceLane:
+ case kX64I16x8Neg:
case kX64I16x8Shl:
case kX64I16x8ShrS:
case kX64I16x8Add:
@@ -154,14 +160,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I16x8MaxS:
case kX64I16x8Eq:
case kX64I16x8Ne:
+ case kX64I16x8GtS:
+ case kX64I16x8GeS:
case kX64I16x8ShrU:
case kX64I16x8AddSaturateU:
case kX64I16x8SubSaturateU:
case kX64I16x8MinU:
case kX64I16x8MaxU:
+ case kX64I16x8GtU:
+ case kX64I16x8GeU:
case kX64I8x16Splat:
case kX64I8x16ExtractLane:
case kX64I8x16ReplaceLane:
+ case kX64I8x16Neg:
case kX64I8x16Add:
case kX64I8x16AddSaturateS:
case kX64I8x16Sub:
@@ -170,10 +181,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I8x16MaxS:
case kX64I8x16Eq:
case kX64I8x16Ne:
+ case kX64I8x16GtS:
+ case kX64I8x16GeS:
case kX64I8x16AddSaturateU:
case kX64I8x16SubSaturateU:
case kX64I8x16MinU:
case kX64I8x16MaxU:
+ case kX64I8x16GtU:
+ case kX64I8x16GeU:
case kX64S128And:
case kX64S128Or:
case kX64S128Xor:
@@ -189,8 +204,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Udiv:
case kX64Udiv32:
return (instr->addressing_mode() == kMode_None)
- ? kMayNeedDeoptCheck
- : kMayNeedDeoptCheck | kIsLoadOperation | kHasSideEffect;
+ ? kMayNeedDeoptOrTrapCheck
+ : kMayNeedDeoptOrTrapCheck | kIsLoadOperation | kHasSideEffect;
case kX64Movsxbl:
case kX64Movzxbl:
@@ -239,7 +254,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
}
UNREACHABLE();
- return kNoOpcodeFlags;
}
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 3f4e2b3b1c..6ac2f428e0 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -233,9 +233,6 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
case MachineRepresentation::kSimd128: // Fall through.
opcode = kX64Movdqu;
break;
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
break;
@@ -270,15 +267,10 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kSimd128: // Fall through.
return kX64Movdqu;
break;
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
- return kArchNop;
}
UNREACHABLE();
- return kArchNop;
}
} // namespace
@@ -434,9 +426,6 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -492,9 +481,6 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
@@ -2054,6 +2040,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
// Emit either ArchTableSwitch or ArchLookupSwitch.
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count;
@@ -2061,7 +2048,8 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min() &&
+ sw.value_range <= kMaxTableSwitchValueRange) {
InstructionOperand index_operand = g.TempRegister();
if (sw.min_value) {
// The leal automatically zero extends, so result is a valid 64-bit index.
@@ -2462,12 +2450,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(16x8) \
V(8x16)
-#define SIMD_ZERO_OP_LIST(V) \
- V(S128Zero) \
- V(S1x4Zero) \
- V(S1x8Zero) \
- V(S1x16Zero)
-
#define SIMD_BINOP_LIST(V) \
V(I32x4Add) \
V(I32x4AddHoriz) \
@@ -2477,8 +2459,12 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4MaxS) \
V(I32x4Eq) \
V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
V(I32x4MinU) \
V(I32x4MaxU) \
+ V(I32x4GtU) \
+ V(I32x4GeU) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
V(I16x8AddHoriz) \
@@ -2489,10 +2475,14 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8MaxS) \
V(I16x8Eq) \
V(I16x8Ne) \
+ V(I16x8GtS) \
+ V(I16x8GeS) \
V(I16x8AddSaturateU) \
V(I16x8SubSaturateU) \
V(I16x8MinU) \
V(I16x8MaxU) \
+ V(I16x8GtU) \
+ V(I16x8GeU) \
V(I8x16Add) \
V(I8x16AddSaturateS) \
V(I8x16Sub) \
@@ -2501,15 +2491,23 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MaxS) \
V(I8x16Eq) \
V(I8x16Ne) \
+ V(I8x16GtS) \
+ V(I8x16GeS) \
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16MinU) \
V(I8x16MaxU) \
+ V(I8x16GtU) \
+ V(I8x16GeU) \
V(S128And) \
V(S128Or) \
V(S128Xor)
-#define SIMD_UNOP_LIST(V) V(S128Not)
+#define SIMD_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
+ V(I8x16Neg) \
+ V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
@@ -2519,6 +2517,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+void InstructionSelector::VisitS128Zero(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node));
+}
+
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
X64OperandGenerator g(this); \
@@ -2549,14 +2552,6 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
-#define SIMD_VISIT_ZERO_OP(Name) \
- void InstructionSelector::Visit##Name(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64S128Zero, g.DefineAsRegister(node), g.DefineAsRegister(node)); \
- }
-SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
-#undef SIMD_VISIT_ZERO_OP
-
#define VISIT_SIMD_SHIFT(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2585,15 +2580,12 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
-#define SIMD_VISIT_SELECT_OP(format) \
- void InstructionSelector::VisitS##format##Select(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64S128Select, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
- }
-SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
-#undef SIMD_VISIT_SELECT_OP
+void InstructionSelector::VisitS128Select(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64S128Select, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/x87/OWNERS b/deps/v8/src/compiler/x87/OWNERS
deleted file mode 100644
index 61245ae8e2..0000000000
--- a/deps/v8/src/compiler/x87/OWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-weiliang.lin@intel.com
-chunyang.dai@intel.com
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
deleted file mode 100644
index 32f1019cd2..0000000000
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ /dev/null
@@ -1,2772 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/code-generator.h"
-
-#include "src/compilation-info.h"
-#include "src/compiler/code-generator-impl.h"
-#include "src/compiler/gap-resolver.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/osr.h"
-#include "src/frames.h"
-#include "src/x87/assembler-x87.h"
-#include "src/x87/frames-x87.h"
-#include "src/x87/macro-assembler-x87.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define __ masm()->
-
-
-// Adds X87 specific methods for decoding operands.
-class X87OperandConverter : public InstructionOperandConverter {
- public:
- X87OperandConverter(CodeGenerator* gen, Instruction* instr)
- : InstructionOperandConverter(gen, instr) {}
-
- Operand InputOperand(size_t index, int extra = 0) {
- return ToOperand(instr_->InputAt(index), extra);
- }
-
- Immediate InputImmediate(size_t index) {
- return ToImmediate(instr_->InputAt(index));
- }
-
- Operand OutputOperand() { return ToOperand(instr_->Output()); }
-
- Operand ToOperand(InstructionOperand* op, int extra = 0) {
- if (op->IsRegister()) {
- DCHECK(extra == 0);
- return Operand(ToRegister(op));
- }
- DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
- return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
- }
-
- Operand SlotToOperand(int slot, int extra = 0) {
- FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
- return Operand(offset.from_stack_pointer() ? esp : ebp,
- offset.offset() + extra);
- }
-
- Operand HighOperand(InstructionOperand* op) {
- DCHECK(op->IsFPStackSlot());
- return ToOperand(op, kPointerSize);
- }
-
- Immediate ToImmediate(InstructionOperand* operand) {
- Constant constant = ToConstant(operand);
- if (constant.type() == Constant::kInt32 &&
- RelocInfo::IsWasmReference(constant.rmode())) {
- return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
- constant.rmode());
- }
- switch (constant.type()) {
- case Constant::kInt32:
- return Immediate(constant.ToInt32());
- case Constant::kFloat32:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
- case Constant::kFloat64:
- return Immediate(
- isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
- case Constant::kExternalReference:
- return Immediate(constant.ToExternalReference());
- case Constant::kHeapObject:
- return Immediate(constant.ToHeapObject());
- case Constant::kInt64:
- break;
- case Constant::kRpoNumber:
- return Immediate::CodeRelativeOffset(ToLabel(operand));
- }
- UNREACHABLE();
- return Immediate(-1);
- }
-
- static size_t NextOffset(size_t* offset) {
- size_t i = *offset;
- (*offset)++;
- return i;
- }
-
- static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
- STATIC_ASSERT(0 == static_cast<int>(times_1));
- STATIC_ASSERT(1 == static_cast<int>(times_2));
- STATIC_ASSERT(2 == static_cast<int>(times_4));
- STATIC_ASSERT(3 == static_cast<int>(times_8));
- int scale = static_cast<int>(mode - one);
- DCHECK(scale >= 0 && scale < 4);
- return static_cast<ScaleFactor>(scale);
- }
-
- Operand MemoryOperand(size_t* offset) {
- AddressingMode mode = AddressingModeField::decode(instr_->opcode());
- switch (mode) {
- case kMode_MR: {
- Register base = InputRegister(NextOffset(offset));
- int32_t disp = 0;
- return Operand(base, disp);
- }
- case kMode_MRI: {
- Register base = InputRegister(NextOffset(offset));
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(base, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_MR1:
- case kMode_MR2:
- case kMode_MR4:
- case kMode_MR8: {
- Register base = InputRegister(NextOffset(offset));
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_MR1, mode);
- int32_t disp = 0;
- return Operand(base, index, scale, disp);
- }
- case kMode_MR1I:
- case kMode_MR2I:
- case kMode_MR4I:
- case kMode_MR8I: {
- Register base = InputRegister(NextOffset(offset));
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_M1:
- case kMode_M2:
- case kMode_M4:
- case kMode_M8: {
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_M1, mode);
- int32_t disp = 0;
- return Operand(index, scale, disp);
- }
- case kMode_M1I:
- case kMode_M2I:
- case kMode_M4I:
- case kMode_M8I: {
- Register index = InputRegister(NextOffset(offset));
- ScaleFactor scale = ScaleFor(kMode_M1I, mode);
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
- }
- case kMode_MI: {
- Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
- return Operand(ctant.ToInt32(), ctant.rmode());
- }
- case kMode_None:
- UNREACHABLE();
- return Operand(no_reg, 0);
- }
- UNREACHABLE();
- return Operand(no_reg, 0);
- }
-
- Operand MemoryOperand(size_t first_input = 0) {
- return MemoryOperand(&first_input);
- }
-};
-
-
-namespace {
-
-bool HasImmediateInput(Instruction* instr, size_t index) {
- return instr->InputAt(index)->IsImmediate();
-}
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xor_(result_, result_); }
-
- private:
- Register const result_;
-};
-
-class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat32NaN(CodeGenerator* gen, X87Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- DCHECK(result_.code() == 0);
- USE(result_);
- __ fstp(0);
- __ push(Immediate(0xffc00000));
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- }
-
- private:
- X87Register const result_;
-};
-
-class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat64NaN(CodeGenerator* gen, X87Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- DCHECK(result_.code() == 0);
- USE(result_);
- __ fstp(0);
- __ push(Immediate(0xfff80000));
- __ push(Immediate(0x00000000));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- }
-
- private:
- X87Register const result_;
-};
-
-class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
- public:
- OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
- X87Register input)
- : OutOfLineCode(gen), result_(result), input_(input) {}
-
- void Generate() final {
- UNIMPLEMENTED();
- USE(result_);
- USE(input_);
- }
-
- private:
- Register const result_;
- X87Register const input_;
-};
-
-
-class OutOfLineRecordWrite final : public OutOfLineCode {
- public:
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
- Register value, Register scratch0, Register scratch1,
- RecordWriteMode mode)
- : OutOfLineCode(gen),
- object_(object),
- operand_(operand),
- value_(value),
- scratch0_(scratch0),
- scratch1_(scratch1),
- mode_(mode) {}
-
- void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, zero,
- exit());
- RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- remembered_set_action, save_fp_mode);
- __ lea(scratch1_, operand_);
- __ CallStub(&stub);
- }
-
- private:
- Register const object_;
- Operand const operand_;
- Register const value_;
- Register const scratch0_;
- Register const scratch1_;
- RecordWriteMode const mode_;
-};
-
-} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto offset = i.InputRegister(0); \
- DCHECK(result.code() == 0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ fstp(0); \
- __ asm_instr(i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- DCHECK(i.InputDoubleRegister(2).code() == 0); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3)); \
- __ bind(&done); \
- } while (false)
-
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- if (instr->InputAt(2)->IsRegister()) { \
- __ asm_instr(i.MemoryOperand(3), i.InputRegister(2)); \
- } else { \
- __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
- } \
- __ bind(&done); \
- } while (false)
-
-#define ASSEMBLE_COMPARE(asm_instr) \
- do { \
- if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
- size_t index = 0; \
- Operand left = i.MemoryOperand(&index); \
- if (HasImmediateInput(instr, index)) { \
- __ asm_instr(left, i.InputImmediate(index)); \
- } else { \
- __ asm_instr(left, i.InputRegister(index)); \
- } \
- } else { \
- if (HasImmediateInput(instr, 1)) { \
- if (instr->InputAt(0)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
- } else { \
- __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
- } \
- } else { \
- if (instr->InputAt(1)->IsRegister()) { \
- __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
- } else { \
- __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
- } \
- } \
- } \
- } while (0)
-
-#define ASSEMBLE_IEEE754_BINOP(name) \
- do { \
- /* Saves the esp into ebx */ \
- __ push(ebx); \
- __ mov(ebx, esp); \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(4, eax); \
- __ fstp(0); \
- /* Load first operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4 + kDoubleSize)); \
- /* Put first operand into stack for function call */ \
- __ fstp_d(Operand(esp, 0 * kDoubleSize)); \
- /* Load second operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4)); \
- /* Put second operand into stack for function call */ \
- __ fstp_d(Operand(esp, 1 * kDoubleSize)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 4); \
- /* Restore the ebx */ \
- __ pop(ebx); \
- /* Return value is in st(0) on x87. */ \
- __ lea(esp, Operand(esp, 2 * kDoubleSize)); \
- } while (false)
-
-#define ASSEMBLE_IEEE754_UNOP(name) \
- do { \
- /* Saves the esp into ebx */ \
- __ push(ebx); \
- __ mov(ebx, esp); \
- /* Pass one double as argument on the stack. */ \
- __ PrepareCallCFunction(2, eax); \
- __ fstp(0); \
- /* Load operand from original stack */ \
- __ fld_d(MemOperand(ebx, 4)); \
- /* Put operand into stack for function call */ \
- __ fstp_d(Operand(esp, 0)); \
- __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
- 2); \
- /* Restore the ebx */ \
- __ pop(ebx); \
- /* Return value is in st(0) on x87. */ \
- __ lea(esp, Operand(esp, kDoubleSize)); \
- } while (false)
-
-void CodeGenerator::AssembleDeconstructFrame() {
- __ mov(esp, ebp);
- __ pop(ebp);
-}
-
-void CodeGenerator::AssemblePrepareTailCall() {
- if (frame_access_state()->has_frame()) {
- __ mov(ebp, MemOperand(ebp, 0));
- }
- frame_access_state()->SetFrameAccessToSP();
-}
-
-void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
- Register, Register,
- Register) {
- // There are not enough temp registers left on ia32 for a call instruction
- // so we pick some scratch registers and save/restore them manually here.
- int scratch_count = 3;
- Register scratch1 = ebx;
- Register scratch2 = ecx;
- Register scratch3 = edx;
- DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
- Label done;
-
- // Check if current frame is an arguments adaptor frame.
- __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &done, Label::kNear);
-
- __ push(scratch1);
- __ push(scratch2);
- __ push(scratch3);
-
- // Load arguments count from current arguments adaptor frame (note, it
- // does not include receiver).
- Register caller_args_count_reg = scratch1;
- __ mov(caller_args_count_reg,
- Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
-
- ParameterCount callee_args_count(args_reg);
- __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
- scratch3, ReturnAddressState::kOnStack, scratch_count);
- __ pop(scratch3);
- __ pop(scratch2);
- __ pop(scratch1);
-
- __ bind(&done);
-}
-
-namespace {
-
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
- FrameAccessState* state,
- int new_slot_above_sp,
- bool allow_shrinkage = true) {
- int current_sp_offset = state->GetSPToFPSlotCount() +
- StandardFrameConstants::kFixedSlotCountAboveFp;
- int stack_slot_delta = new_slot_above_sp - current_sp_offset;
- if (stack_slot_delta > 0) {
- masm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
- state->IncreaseSPDelta(stack_slot_delta);
- } else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
- state->IncreaseSPDelta(stack_slot_delta);
- }
-}
-
-} // namespace
-
-void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
- int first_unused_stack_slot) {
- CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
- ZoneVector<MoveOperands*> pushes(zone());
- GetPushCompatibleMoves(instr, flags, &pushes);
-
- if (!pushes.empty() &&
- (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
- first_unused_stack_slot)) {
- X87OperandConverter g(this, instr);
- for (auto move : pushes) {
- LocationOperand destination_location(
- LocationOperand::cast(move->destination()));
- InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- destination_location.index());
- if (source.IsStackSlot()) {
- LocationOperand source_location(LocationOperand::cast(source));
- __ push(g.SlotToOperand(source_location.index()));
- } else if (source.IsRegister()) {
- LocationOperand source_location(LocationOperand::cast(source));
- __ push(source_location.GetRegister());
- } else if (source.IsImmediate()) {
- __ push(Immediate(ImmediateOperand::cast(source).inline_value()));
- } else {
- // Pushes of non-scalar data types is not supported.
- UNIMPLEMENTED();
- }
- frame_access_state()->IncreaseSPDelta(1);
- move->Eliminate();
- }
- }
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- first_unused_stack_slot, false);
-}
-
-void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
- int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(masm(), frame_access_state(),
- first_unused_stack_slot);
-}
-
-// Assembles an instruction after register allocation, producing machine code.
-CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
- Instruction* instr) {
- X87OperandConverter i(this, instr);
- InstructionCode opcode = instr->opcode();
- ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
-
- switch (arch_opcode) {
- case kArchCallCodeObject: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- EnsureSpaceForLazyDeopt();
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ call(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
- }
- RecordCallPosition(instr);
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
- no_reg, no_reg, no_reg);
- }
- if (HasImmediateInput(instr, 0)) {
- Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
- __ jmp(code, RelocInfo::CODE_TARGET);
- } else {
- Register reg = i.InputRegister(0);
- __ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
- }
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchTailCallAddress: {
- CHECK(!HasImmediateInput(instr, 0));
- Register reg = i.InputRegister(0);
- __ jmp(reg);
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchCallJSFunction: {
- EnsureSpaceForLazyDeopt();
- Register func = i.InputRegister(0);
- if (FLAG_debug_code) {
- // Check the function's context matches the context argument.
- __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- RecordCallPosition(instr);
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchTailCallJSFunctionFromJSFunction: {
- Register func = i.InputRegister(0);
- if (FLAG_debug_code) {
- // Check the function's context matches the context argument.
- __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
- }
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, no_reg,
- no_reg, no_reg);
- __ jmp(FieldOperand(func, JSFunction::kCodeEntryOffset));
- frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
- break;
- }
- case kArchPrepareCallCFunction: {
- // Frame alignment requires using FP-relative frame addressing.
- frame_access_state()->SetFrameAccessToFP();
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
- break;
- }
- case kArchPrepareTailCall:
- AssemblePrepareTailCall();
- break;
- case kArchCallCFunction: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- int const num_parameters = MiscField::decode(instr->opcode());
- if (HasImmediateInput(instr, 0)) {
- ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
- } else {
- Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
- }
- bool double_result =
- instr->HasOutput() && instr->Output()->IsFPRegister();
- if (double_result) {
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- }
- __ fninit();
- if (double_result) {
- __ fld_d(Operand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- } else {
- __ fld1();
- }
- frame_access_state()->SetFrameAccessToDefault();
- frame_access_state()->ClearSPDelta();
- break;
- }
- case kArchJmp:
- AssembleArchJump(i.InputRpo(0));
- break;
- case kArchLookupSwitch:
- AssembleArchLookupSwitch(instr);
- break;
- case kArchTableSwitch:
- AssembleArchTableSwitch(instr);
- break;
- case kArchComment: {
- Address comment_string = i.InputExternalReference(0).address();
- __ RecordComment(reinterpret_cast<const char*>(comment_string));
- break;
- }
- case kArchDebugBreak:
- __ int3();
- break;
- case kArchNop:
- case kArchThrowTerminator:
- // don't emit code for nops.
- break;
- case kArchDeoptimize: {
- int deopt_state_id =
- BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- int double_register_param_count = 0;
- int x87_layout = 0;
- for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsFPRegister()) {
- double_register_param_count++;
- }
- }
- // Currently we use only one X87 register. If double_register_param_count
- // is bigger than 1, it means duplicated double register is added to input
- // of this instruction.
- if (double_register_param_count > 0) {
- x87_layout = (0 << 3) | 1;
- }
- // The layout of x87 register stack is loaded on the top of FPU register
- // stack for deoptimization.
- __ push(Immediate(x87_layout));
- __ fild_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
-
- CodeGenResult result =
- AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
- if (result != kSuccess) return result;
- break;
- }
- case kArchRet:
- AssembleReturn(instr->InputAt(0));
- break;
- case kArchFramePointer:
- __ mov(i.OutputRegister(), ebp);
- break;
- case kArchStackPointer:
- __ mov(i.OutputRegister(), esp);
- break;
- case kArchParentFramePointer:
- if (frame_access_state()->has_frame()) {
- __ mov(i.OutputRegister(), Operand(ebp, 0));
- } else {
- __ mov(i.OutputRegister(), ebp);
- }
- break;
- case kArchTruncateDoubleToI: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister());
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
- Register object = i.InputRegister(0);
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- Register value = i.InputRegister(index);
- Register scratch0 = i.TempRegister(0);
- Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
- scratch0, scratch1, mode);
- __ mov(operand, value);
- __ CheckPageFlag(object, scratch0,
- MemoryChunk::kPointersFromHereAreInterestingMask,
- not_zero, ool->entry());
- __ bind(ool->exit());
- break;
- }
- case kArchStackSlot: {
- FrameOffset offset =
- frame_access_state()->GetFrameOffset(i.InputInt32(0));
- Register base;
- if (offset.from_stack_pointer()) {
- base = esp;
- } else {
- base = ebp;
- }
- __ lea(i.OutputRegister(), Operand(base, offset.offset()));
- break;
- }
- case kIeee754Float64Acos:
- ASSEMBLE_IEEE754_UNOP(acos);
- break;
- case kIeee754Float64Acosh:
- ASSEMBLE_IEEE754_UNOP(acosh);
- break;
- case kIeee754Float64Asin:
- ASSEMBLE_IEEE754_UNOP(asin);
- break;
- case kIeee754Float64Asinh:
- ASSEMBLE_IEEE754_UNOP(asinh);
- break;
- case kIeee754Float64Atan:
- ASSEMBLE_IEEE754_UNOP(atan);
- break;
- case kIeee754Float64Atanh:
- ASSEMBLE_IEEE754_UNOP(atanh);
- break;
- case kIeee754Float64Atan2:
- ASSEMBLE_IEEE754_BINOP(atan2);
- break;
- case kIeee754Float64Cbrt:
- ASSEMBLE_IEEE754_UNOP(cbrt);
- break;
- case kIeee754Float64Cos:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(cos);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Cosh:
- ASSEMBLE_IEEE754_UNOP(cosh);
- break;
- case kIeee754Float64Expm1:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(expm1);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Exp:
- ASSEMBLE_IEEE754_UNOP(exp);
- break;
- case kIeee754Float64Log:
- ASSEMBLE_IEEE754_UNOP(log);
- break;
- case kIeee754Float64Log1p:
- ASSEMBLE_IEEE754_UNOP(log1p);
- break;
- case kIeee754Float64Log2:
- ASSEMBLE_IEEE754_UNOP(log2);
- break;
- case kIeee754Float64Log10:
- ASSEMBLE_IEEE754_UNOP(log10);
- break;
- case kIeee754Float64Pow: {
- // Keep the x87 FPU stack empty before calling stub code
- __ fstp(0);
- // Call the MathStub and put return value in stX_0
- MathPowStub stub(isolate(), MathPowStub::DOUBLE);
- __ CallStub(&stub);
- /* Return value is in st(0) on x87. */
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kIeee754Float64Sin:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(sin);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Sinh:
- ASSEMBLE_IEEE754_UNOP(sinh);
- break;
- case kIeee754Float64Tan:
- __ X87SetFPUCW(0x027F);
- ASSEMBLE_IEEE754_UNOP(tan);
- __ X87SetFPUCW(0x037F);
- break;
- case kIeee754Float64Tanh:
- ASSEMBLE_IEEE754_UNOP(tanh);
- break;
- case kX87Add:
- if (HasImmediateInput(instr, 1)) {
- __ add(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ add(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87And:
- if (HasImmediateInput(instr, 1)) {
- __ and_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ and_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Cmp:
- ASSEMBLE_COMPARE(cmp);
- break;
- case kX87Cmp16:
- ASSEMBLE_COMPARE(cmpw);
- break;
- case kX87Cmp8:
- ASSEMBLE_COMPARE(cmpb);
- break;
- case kX87Test:
- ASSEMBLE_COMPARE(test);
- break;
- case kX87Test16:
- ASSEMBLE_COMPARE(test_w);
- break;
- case kX87Test8:
- ASSEMBLE_COMPARE(test_b);
- break;
- case kX87Imul:
- if (HasImmediateInput(instr, 1)) {
- __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
- } else {
- __ imul(i.OutputRegister(), i.InputOperand(1));
- }
- break;
- case kX87ImulHigh:
- __ imul(i.InputRegister(1));
- break;
- case kX87UmulHigh:
- __ mul(i.InputRegister(1));
- break;
- case kX87Idiv:
- __ cdq();
- __ idiv(i.InputOperand(1));
- break;
- case kX87Udiv:
- __ Move(edx, Immediate(0));
- __ div(i.InputOperand(1));
- break;
- case kX87Not:
- __ not_(i.OutputOperand());
- break;
- case kX87Neg:
- __ neg(i.OutputOperand());
- break;
- case kX87Or:
- if (HasImmediateInput(instr, 1)) {
- __ or_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ or_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Xor:
- if (HasImmediateInput(instr, 1)) {
- __ xor_(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ xor_(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Sub:
- if (HasImmediateInput(instr, 1)) {
- __ sub(i.InputOperand(0), i.InputImmediate(1));
- } else {
- __ sub(i.InputRegister(0), i.InputOperand(1));
- }
- break;
- case kX87Shl:
- if (HasImmediateInput(instr, 1)) {
- __ shl(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ shl_cl(i.OutputOperand());
- }
- break;
- case kX87Shr:
- if (HasImmediateInput(instr, 1)) {
- __ shr(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ shr_cl(i.OutputOperand());
- }
- break;
- case kX87Sar:
- if (HasImmediateInput(instr, 1)) {
- __ sar(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ sar_cl(i.OutputOperand());
- }
- break;
- case kX87AddPair: {
- // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
- // i.InputRegister(1) ... left high word.
- // i.InputRegister(2) ... right low word.
- // i.InputRegister(3) ... right high word.
- bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
- i.OutputRegister(0).code() == i.InputRegister(3).code()) {
- // We cannot write to the output register directly, because it would
- // overwrite an input for adc. We have to use the temp register.
- use_temp = true;
- __ Move(i.TempRegister(0), i.InputRegister(0));
- __ add(i.TempRegister(0), i.InputRegister(2));
- } else {
- __ add(i.OutputRegister(0), i.InputRegister(2));
- }
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
- __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
- if (use_temp) {
- __ Move(i.OutputRegister(0), i.TempRegister(0));
- }
- break;
- }
- case kX87SubPair: {
- // i.OutputRegister(0) == i.InputRegister(0) ... left low word.
- // i.InputRegister(1) ... left high word.
- // i.InputRegister(2) ... right low word.
- // i.InputRegister(3) ... right high word.
- bool use_temp = false;
- if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
- i.OutputRegister(0).code() == i.InputRegister(3).code()) {
- // We cannot write to the output register directly, because it would
- // overwrite an input for adc. We have to use the temp register.
- use_temp = true;
- __ Move(i.TempRegister(0), i.InputRegister(0));
- __ sub(i.TempRegister(0), i.InputRegister(2));
- } else {
- __ sub(i.OutputRegister(0), i.InputRegister(2));
- }
- if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
- __ Move(i.OutputRegister(1), i.InputRegister(1));
- }
- __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
- if (use_temp) {
- __ Move(i.OutputRegister(0), i.TempRegister(0));
- }
- break;
- }
- case kX87MulPair: {
- __ imul(i.OutputRegister(1), i.InputOperand(0));
- __ mov(i.TempRegister(0), i.InputOperand(1));
- __ imul(i.TempRegister(0), i.InputOperand(2));
- __ add(i.OutputRegister(1), i.TempRegister(0));
- __ mov(i.OutputRegister(0), i.InputOperand(0));
- // Multiplies the low words and stores them in eax and edx.
- __ mul(i.InputRegister(2));
- __ add(i.OutputRegister(1), i.TempRegister(0));
-
- break;
- }
- case kX87ShlPair:
- if (HasImmediateInput(instr, 2)) {
- __ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87ShrPair:
- if (HasImmediateInput(instr, 2)) {
- __ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87SarPair:
- if (HasImmediateInput(instr, 2)) {
- __ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
- } else {
- // Shift has been loaded into CL by the register allocator.
- __ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
- }
- break;
- case kX87Ror:
- if (HasImmediateInput(instr, 1)) {
- __ ror(i.OutputOperand(), i.InputInt5(1));
- } else {
- __ ror_cl(i.OutputOperand());
- }
- break;
- case kX87Lzcnt:
- __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
- break;
- case kX87Popcnt:
- __ Popcnt(i.OutputRegister(), i.InputOperand(0));
- break;
- case kX87LoadFloat64Constant: {
- InstructionOperand* source = instr->InputAt(0);
- InstructionOperand* destination = instr->Output();
- DCHECK(source->IsConstant());
- X87OperandConverter g(this, nullptr);
- Constant src_constant = g.ToConstant(source);
-
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ mov(MemOperand(esp, 0), Immediate(lower));
- __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- UNREACHABLE();
- }
- break;
- }
- case kX87Float32Cmp: {
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ FCmp();
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float32Add: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, kFloatSize));
- __ faddp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Sub: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fsubp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Mul: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fmulp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float32Div: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fdivp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
-
- case kX87Float32Sqrt: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fsqrt();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Abs: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fabs();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Neg: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ fchs();
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87Float32Round: {
- RoundingMode mode =
- static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- // Set the correct round mode in x87 control register
- __ X87SetRC((mode << 10));
-
- if (!instr->InputAt(0)->IsFPRegister()) {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.InputOperand(0));
- }
- __ frndint();
- __ X87SetRC(0x0000);
- break;
- }
- case kX87Float64Add: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ faddp();
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Sub: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fsub_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Mul: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fmul_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Div: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fdiv_d(MemOperand(esp, 0));
- // Clear stack.
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- // Restore the default value of control word.
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Mod: {
- FrameScope frame_scope(&masm_, StackFrame::MANUAL);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ mov(eax, esp);
- __ PrepareCallCFunction(4, eax);
- __ fstp(0);
- __ fld_d(MemOperand(eax, 0));
- __ fstp_d(Operand(esp, 1 * kDoubleSize));
- __ fld_d(MemOperand(eax, kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
- 4);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float32Max: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(below, &done_compare, Label::kNear);
- __ j(above, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kFloatSize));
- __ fld(1);
- __ fstp_s(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 0));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kFloatSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float64Max: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(below, &done_compare, Label::kNear);
- __ j(above, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fld(1);
- __ fstp_d(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 4));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float32Min: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(MemOperand(esp, kFloatSize));
- __ fld_s(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(above, &done_compare, Label::kNear);
- __ j(below, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kFloatSize));
- __ fld(0);
- __ fstp_s(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 0));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kFloatSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kFloatSize));
- break;
- }
- case kX87Float64Min: {
- Label compare_swap, done_compare;
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ fld(1);
- __ fld(1);
- __ FCmp();
-
- auto ool =
- new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
- __ j(parity_even, ool->entry());
- __ j(above, &done_compare, Label::kNear);
- __ j(below, &compare_swap, Label::kNear);
- __ push(eax);
- __ lea(esp, Operand(esp, -kDoubleSize));
- __ fld(0);
- __ fstp_d(Operand(esp, 0));
- __ mov(eax, MemOperand(esp, 4));
- __ and_(eax, Immediate(0x80000000));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ pop(eax);
- __ j(zero, &done_compare, Label::kNear);
-
- __ bind(&compare_swap);
- __ bind(ool->exit());
- __ fxch(1);
-
- __ bind(&done_compare);
- __ fstp(0);
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float64Abs: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fabs();
- __ lea(esp, Operand(esp, kDoubleSize));
- break;
- }
- case kX87Float64Neg: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fchs();
- __ lea(esp, Operand(esp, kDoubleSize));
- break;
- }
- case kX87Int32ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (input->IsRegister()) {
- Register input_reg = i.InputRegister(0);
- __ push(input_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- } else {
- __ fild_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Uint32ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- Label msb_set_src;
- Label jmp_return;
- // Put input integer into eax(tmporarilly)
- __ push(eax);
- if (input->IsRegister())
- __ mov(eax, i.InputRegister(0));
- else
- __ mov(eax, i.InputOperand(0));
-
- __ test(eax, eax);
- __ j(sign, &msb_set_src, Label::kNear);
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
-
- __ jmp(&jmp_return, Label::kNear);
- __ bind(&msb_set_src);
- // Need another temp reg
- __ push(ebx);
- __ mov(ebx, eax);
- __ shr(eax, 1);
- // Recover the least significant bit to avoid rounding errors.
- __ and_(ebx, Immediate(1));
- __ or_(eax, ebx);
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- __ fld(0);
- __ faddp();
- // Restore the ebx
- __ pop(ebx);
- __ bind(&jmp_return);
- // Restore the eax
- __ pop(eax);
- break;
- }
- case kX87Int32ToFloat64: {
- InstructionOperand* input = instr->InputAt(0);
- DCHECK(input->IsRegister() || input->IsStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (input->IsRegister()) {
- Register input_reg = i.InputRegister(0);
- __ push(input_reg);
- __ fild_s(Operand(esp, 0));
- __ pop(input_reg);
- } else {
- __ fild_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Float32ToFloat64: {
- InstructionOperand* input = instr->InputAt(0);
- if (input->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Uint32ToFloat64: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ LoadUint32NoSSE2(i.InputRegister(0));
- break;
- }
- case kX87Float32ToInt32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_s(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float32ToUint32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_s(i.InputOperand(0));
- }
- Label success;
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ test(i.OutputRegister(0), i.OutputRegister(0));
- __ j(positive, &success);
- // Need to reserve the input float32 data.
- __ fld(0);
- __ push(Immediate(INT32_MIN));
- __ fild_s(Operand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
- __ faddp();
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ or_(i.OutputRegister(0), Immediate(0x80000000));
- // Only keep input float32 data in x87 stack when return.
- __ fstp(0);
- __ bind(&success);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ToInt32: {
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ TruncateX87TOSToI(i.OutputRegister(0));
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ToFloat32: {
- InstructionOperand* input = instr->InputAt(0);
- if (input->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.InputOperand(0));
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_s(MemOperand(esp, 0));
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- }
- break;
- }
- case kX87Float64ToUint32: {
- __ push_imm32(-2147483648);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fld_d(i.InputOperand(0));
- }
- __ fild_s(Operand(esp, 0));
- __ fld(1);
- __ faddp();
- __ TruncateX87TOSToI(i.OutputRegister(0));
- __ add(esp, Immediate(kInt32Size));
- __ add(i.OutputRegister(), Immediate(0x80000000));
- __ fstp(0);
- if (!instr->InputAt(0)->IsFPRegister()) {
- __ fstp(0);
- }
- break;
- }
- case kX87Float64ExtractHighWord32: {
- if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ mov(i.OutputRegister(), MemOperand(esp, kDoubleSize / 2));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
- }
- break;
- }
- case kX87Float64ExtractLowWord32: {
- if (instr->InputAt(0)->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(MemOperand(esp, 0));
- __ mov(i.OutputRegister(), MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- __ mov(i.OutputRegister(), i.InputOperand(0));
- }
- break;
- }
- case kX87Float64InsertHighWord32: {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
- __ mov(MemOperand(esp, kDoubleSize / 2), i.InputRegister(1));
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- break;
- }
- case kX87Float64InsertLowWord32: {
- __ sub(esp, Immediate(kDoubleSize));
- __ fstp_d(MemOperand(esp, 0));
- __ mov(MemOperand(esp, 0), i.InputRegister(1));
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- break;
- }
- case kX87Float64Sqrt: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ X87SetFPUCW(0x027F);
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ fsqrt();
- __ lea(esp, Operand(esp, kDoubleSize));
- __ X87SetFPUCW(0x037F);
- break;
- }
- case kX87Float64Round: {
- RoundingMode mode =
- static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- // Set the correct round mode in x87 control register
- __ X87SetRC((mode << 10));
-
- if (!instr->InputAt(0)->IsFPRegister()) {
- InstructionOperand* input = instr->InputAt(0);
- USE(input);
- DCHECK(input->IsFPStackSlot());
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.InputOperand(0));
- }
- __ frndint();
- __ X87SetRC(0x0000);
- break;
- }
- case kX87Float64Cmp: {
- __ fld_d(MemOperand(esp, kDoubleSize));
- __ fld_d(MemOperand(esp, 0));
- __ FCmp();
- __ lea(esp, Operand(esp, 2 * kDoubleSize));
- break;
- }
- case kX87Float64SilenceNaN: {
- Label end, return_qnan;
- __ fstp(0);
- __ push(ebx);
- // Load Half word of HoleNan(SNaN) into ebx
- __ mov(ebx, MemOperand(esp, 2 * kInt32Size));
- __ cmp(ebx, Immediate(kHoleNanUpper32));
- // Check input is HoleNaN(SNaN)?
- __ j(equal, &return_qnan, Label::kNear);
- // If input isn't HoleNaN(SNaN), just load it and return
- __ fld_d(MemOperand(esp, 1 * kInt32Size));
- __ jmp(&end);
- __ bind(&return_qnan);
- // If input is HoleNaN(SNaN), Return QNaN
- __ push(Immediate(0xffffffff));
- __ push(Immediate(0xfff7ffff));
- __ fld_d(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kDoubleSize));
- __ bind(&end);
- __ pop(ebx);
- // Clear stack.
- __ lea(esp, Operand(esp, 1 * kDoubleSize));
- break;
- }
- case kX87Movsxbl:
- __ movsx_b(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movzxbl:
- __ movzx_b(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov_b(operand, i.InputInt8(index));
- } else {
- __ mov_b(operand, i.InputRegister(index));
- }
- break;
- }
- case kX87Movsxwl:
- __ movsx_w(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movzxwl:
- __ movzx_w(i.OutputRegister(), i.MemoryOperand());
- break;
- case kX87Movw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov_w(operand, i.InputInt16(index));
- } else {
- __ mov_w(operand, i.InputRegister(index));
- }
- break;
- }
- case kX87Movl:
- if (instr->HasOutput()) {
- __ mov(i.OutputRegister(), i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- if (HasImmediateInput(instr, index)) {
- __ mov(operand, i.InputImmediate(index));
- } else {
- __ mov(operand, i.InputRegister(index));
- }
- }
- break;
- case kX87Movsd: {
- if (instr->HasOutput()) {
- X87Register output = i.OutputDoubleRegister();
- USE(output);
- DCHECK(output.code() == 0);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_d(i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ fst_d(operand);
- }
- break;
- }
- case kX87Movss: {
- if (instr->HasOutput()) {
- X87Register output = i.OutputDoubleRegister();
- USE(output);
- DCHECK(output.code() == 0);
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- __ fld_s(i.MemoryOperand());
- } else {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ fst_s(operand);
- }
- break;
- }
- case kX87BitcastFI: {
- __ mov(i.OutputRegister(), MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- break;
- }
- case kX87BitcastIF: {
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- __ fstp(0);
- if (instr->InputAt(0)->IsRegister()) {
- __ lea(esp, Operand(esp, -kFloatSize));
- __ mov(MemOperand(esp, 0), i.InputRegister(0));
- __ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
- } else {
- __ fld_s(i.InputOperand(0));
- }
- break;
- }
- case kX87Lea: {
- AddressingMode mode = AddressingModeField::decode(instr->opcode());
- // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
- // and addressing mode just happens to work out. The "addl"/"subl" forms
- // in these cases are faster based on measurements.
- if (mode == kMode_MI) {
- __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
- } else if (i.InputRegister(0).is(i.OutputRegister())) {
- if (mode == kMode_MRI) {
- int32_t constant_summand = i.InputInt32(1);
- if (constant_summand > 0) {
- __ add(i.OutputRegister(), Immediate(constant_summand));
- } else if (constant_summand < 0) {
- __ sub(i.OutputRegister(), Immediate(-constant_summand));
- }
- } else if (mode == kMode_MR1) {
- if (i.InputRegister(1).is(i.OutputRegister())) {
- __ shl(i.OutputRegister(), 1);
- } else {
- __ add(i.OutputRegister(), i.InputRegister(1));
- }
- } else if (mode == kMode_M2) {
- __ shl(i.OutputRegister(), 1);
- } else if (mode == kMode_M4) {
- __ shl(i.OutputRegister(), 2);
- } else if (mode == kMode_M8) {
- __ shl(i.OutputRegister(), 3);
- } else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
- }
- } else if (mode == kMode_MR1 &&
- i.InputRegister(1).is(i.OutputRegister())) {
- __ add(i.OutputRegister(), i.InputRegister(0));
- } else {
- __ lea(i.OutputRegister(), i.MemoryOperand());
- }
- break;
- }
- case kX87Push:
- if (instr->InputAt(0)->IsFPRegister()) {
- auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kFloatSize));
- __ fst_s(Operand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
- } else {
- DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
- __ sub(esp, Immediate(kDoubleSize));
- __ fst_d(Operand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- }
- } else if (instr->InputAt(0)->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*instr->InputAt(0));
- if (allocated.representation() == MachineRepresentation::kFloat32) {
- __ sub(esp, Immediate(kFloatSize));
- __ fld_s(i.InputOperand(0));
- __ fstp_s(MemOperand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
- } else {
- DCHECK(allocated.representation() == MachineRepresentation::kFloat64);
- __ sub(esp, Immediate(kDoubleSize));
- __ fld_d(i.InputOperand(0));
- __ fstp_d(MemOperand(esp, 0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- }
- } else if (HasImmediateInput(instr, 0)) {
- __ push(i.InputImmediate(0));
- frame_access_state()->IncreaseSPDelta(1);
- } else {
- __ push(i.InputOperand(0));
- frame_access_state()->IncreaseSPDelta(1);
- }
- break;
- case kX87Poke: {
- int const slot = MiscField::decode(instr->opcode());
- if (HasImmediateInput(instr, 0)) {
- __ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
- } else {
- __ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
- }
- break;
- }
- case kX87Xchgb: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_b(i.InputRegister(index), operand);
- break;
- }
- case kX87Xchgw: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg_w(i.InputRegister(index), operand);
- break;
- }
- case kX87Xchgl: {
- size_t index = 0;
- Operand operand = i.MemoryOperand(&index);
- __ xchg(i.InputRegister(index), operand);
- break;
- }
- case kX87PushFloat32:
- __ lea(esp, Operand(esp, -kFloatSize));
- if (instr->InputAt(0)->IsFPStackSlot()) {
- __ fld_s(i.InputOperand(0));
- __ fstp_s(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ fst_s(MemOperand(esp, 0));
- } else {
- UNREACHABLE();
- }
- break;
- case kX87PushFloat64:
- __ lea(esp, Operand(esp, -kDoubleSize));
- if (instr->InputAt(0)->IsFPStackSlot()) {
- __ fld_d(i.InputOperand(0));
- __ fstp_d(MemOperand(esp, 0));
- } else if (instr->InputAt(0)->IsFPRegister()) {
- __ fst_d(MemOperand(esp, 0));
- } else {
- UNREACHABLE();
- }
- break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_s, OutOfLineLoadFloat32NaN);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(fld_d, OutOfLineLoadFloat64NaN);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(fst_s);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(fst_d);
- break;
- case kX87StackCheck: {
- ExternalReference const stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- break;
- }
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
- case kAtomicLoadInt8:
- case kAtomicLoadUint8:
- case kAtomicLoadInt16:
- case kAtomicLoadUint16:
- case kAtomicLoadWord32:
- case kAtomicStoreWord8:
- case kAtomicStoreWord16:
- case kAtomicStoreWord32:
- UNREACHABLE(); // Won't be generated by instruction selector.
- break;
- }
- return kSuccess;
-} // NOLINT(readability/fn_size)
-
-static Condition FlagsConditionToCondition(FlagsCondition condition) {
- switch (condition) {
- case kUnorderedEqual:
- case kEqual:
- return equal;
- break;
- case kUnorderedNotEqual:
- case kNotEqual:
- return not_equal;
- break;
- case kSignedLessThan:
- return less;
- break;
- case kSignedGreaterThanOrEqual:
- return greater_equal;
- break;
- case kSignedLessThanOrEqual:
- return less_equal;
- break;
- case kSignedGreaterThan:
- return greater;
- break;
- case kUnsignedLessThan:
- return below;
- break;
- case kUnsignedGreaterThanOrEqual:
- return above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- return below_equal;
- break;
- case kUnsignedGreaterThan:
- return above;
- break;
- case kOverflow:
- return overflow;
- break;
- case kNotOverflow:
- return no_overflow;
- break;
- default:
- UNREACHABLE();
- return no_condition;
- break;
- }
-}
-
-// Assembles a branch after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
-
- Label done;
- Label tlabel_tmp;
- Label flabel_tmp;
- Label* tlabel = &tlabel_tmp;
- Label* flabel = &flabel_tmp;
-
- Label* tlabel_dst = branch->true_label;
- Label* flabel_dst = branch->false_label;
-
- if (branch->condition == kUnorderedEqual) {
- __ j(parity_even, flabel, flabel_distance);
- } else if (branch->condition == kUnorderedNotEqual) {
- __ j(parity_even, tlabel);
- }
- __ j(FlagsConditionToCondition(branch->condition), tlabel);
-
- // Add a jump if not falling through to the next block.
- if (!branch->fallthru) __ jmp(flabel);
-
- __ jmp(&done);
- __ bind(&tlabel_tmp);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_deoptimize) {
- int double_register_param_count = 0;
- int x87_layout = 0;
- for (size_t i = 0; i < instr->InputCount(); i++) {
- if (instr->InputAt(i)->IsFPRegister()) {
- double_register_param_count++;
- }
- }
- // Currently we use only one X87 register. If double_register_param_count
- // is bigger than 1, it means duplicated double register is added to input
- // of this instruction.
- if (double_register_param_count > 0) {
- x87_layout = (0 << 3) | 1;
- }
- // The layout of x87 register stack is loaded on the top of FPU register
- // stack for deoptimization.
- __ push(Immediate(x87_layout));
- __ fild_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kPointerSize));
- }
- __ jmp(tlabel_dst);
- __ bind(&flabel_tmp);
- __ jmp(flabel_dst);
- __ bind(&done);
-}
-
-
-void CodeGenerator::AssembleArchJump(RpoNumber target) {
- if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
-}
-
-void CodeGenerator::AssembleArchTrap(Instruction* instr,
- FlagsCondition condition) {
- class OutOfLineTrap final : public OutOfLineCode {
- public:
- OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
- : OutOfLineCode(gen),
- frame_elided_(frame_elided),
- instr_(instr),
- gen_(gen) {}
-
- void Generate() final {
- X87OperandConverter i(gen_, instr_);
-
- Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
- i.InputInt32(instr_->InputCount() - 1));
- bool old_has_frame = __ has_frame();
- if (frame_elided_) {
- __ set_has_frame(true);
- __ EnterFrame(StackFrame::WASM_COMPILED);
- }
- GenerateCallToTrap(trap_id);
- if (frame_elided_) {
- ReferenceMap* reference_map =
- new (gen_->zone()) ReferenceMap(gen_->zone());
- gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
- Safepoint::kNoLazyDeopt);
- __ set_has_frame(old_has_frame);
- }
- if (FLAG_debug_code) {
- __ ud2();
- }
- }
-
- private:
- void GenerateCallToTrap(Runtime::FunctionId trap_id) {
- if (trap_id == Runtime::kNumFunctions) {
- // We cannot test calls to the runtime in cctest/test-run-wasm.
- // Therefore we emit a call to C here instead of a call to the runtime.
- __ PrepareCallCFunction(0, esi);
- __ CallCFunction(
- ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
- 0);
- } else {
- __ Move(esi, isolate()->native_context());
- gen_->AssembleSourcePosition(instr_);
- __ CallRuntime(trap_id);
- }
- }
-
- bool frame_elided_;
- Instruction* instr_;
- CodeGenerator* gen_;
- };
- bool frame_elided = !frame_access_state()->has_frame();
- auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
- Label* tlabel = ool->entry();
- Label end;
- if (condition == kUnorderedEqual) {
- __ j(parity_even, &end);
- } else if (condition == kUnorderedNotEqual) {
- __ j(parity_even, tlabel);
- }
- __ j(FlagsConditionToCondition(condition), tlabel);
- __ bind(&end);
-}
-
-// Assembles boolean materializations after an instruction.
-void CodeGenerator::AssembleArchBoolean(Instruction* instr,
- FlagsCondition condition) {
- X87OperandConverter i(this, instr);
- Label done;
-
- // Materialize a full 32-bit 1 or 0 value. The result register is always the
- // last output of the instruction.
- Label check;
- DCHECK_NE(0u, instr->OutputCount());
- Register reg = i.OutputRegister(instr->OutputCount() - 1);
- if (condition == kUnorderedEqual) {
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- } else if (condition == kUnorderedNotEqual) {
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- }
- Condition cc = FlagsConditionToCondition(condition);
-
- __ bind(&check);
- if (reg.is_byte_register()) {
- // setcc for byte registers (al, bl, cl, dl).
- __ setcc(cc, reg);
- __ movzx_b(reg, reg);
- } else {
- // Emit a branch to set a register to either 1 or 0.
- Label set;
- __ j(cc, &set, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- __ bind(&set);
- __ mov(reg, Immediate(1));
- }
- __ bind(&done);
-}
-
-
-void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
- X87OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmp(input, Immediate(i.InputInt32(index + 0)));
- __ j(equal, GetLabel(i.InputRpo(index + 1)));
- }
- AssembleArchJump(i.InputRpo(1));
-}
-
-
-void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
- X87OperandConverter i(this, instr);
- Register input = i.InputRegister(0);
- size_t const case_count = instr->InputCount() - 2;
- Label** cases = zone()->NewArray<Label*>(case_count);
- for (size_t index = 0; index < case_count; ++index) {
- cases[index] = GetLabel(i.InputRpo(index + 2));
- }
- Label* const table = AddJumpTable(cases, case_count);
- __ cmp(input, Immediate(case_count));
- __ j(above_equal, GetLabel(i.InputRpo(1)));
- __ jmp(Operand::JumpTable(input, times_4, table));
-}
-
-CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
- int deoptimization_id, SourcePosition pos) {
- DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
- DeoptimizeReason deoptimization_reason =
- GetDeoptimizationReason(deoptimization_id);
- Deoptimizer::BailoutType bailout_type =
- deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
- : Deoptimizer::EAGER;
- Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, bailout_type);
- if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
- __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
- __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
- return kSuccess;
-}
-
-
-// The calling convention for JSFunctions on X87 passes arguments on the
-// stack and the JSFunction and context in EDI and ESI, respectively, thus
-// the steps of the call look as follows:
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ push arguments and setup ESI, EDI }--------------------------------------
-// | args + receiver | caller frame |
-// ^ esp ^ ebp
-// [edi = JSFunction, esi = context]
-
-// --{ call [edi + kCodeEntryOffset] }------------------------------------------
-// | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// =={ prologue of called function }============================================
-// --{ push ebp }---------------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov ebp, esp }-----------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ ebp,esp
-
-// --{ push esi }---------------------------------------------------------------
-// | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// --{ push edi }---------------------------------------------------------------
-// | FNC | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// --{ subi esp, #N }-----------------------------------------------------------
-// | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
-// ^esp ^ ebp
-
-// =={ body of called function }================================================
-
-// =={ epilogue of called function }============================================
-// --{ mov esp, ebp }-----------------------------------------------------------
-// | FP | RET | args + receiver | caller frame |
-// ^ esp,ebp
-
-// --{ pop ebp }-----------------------------------------------------------
-// | | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// --{ ret #A+1 }-----------------------------------------------------------
-// | | caller frame |
-// ^ esp ^ ebp
-
-
-// Runtime function calls are accomplished by doing a stub call to the
-// CEntryStub (a real code object). On X87 passes arguments on the
-// stack, the number of arguments in EAX, the address of the runtime function
-// in EBX, and the context in ESI.
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
-// | args + receiver | caller frame |
-// ^ esp ^ ebp
-// [eax = #args, ebx = runtime function, esi = context]
-
-// --{ call #CEntryStub }-------------------------------------------------------
-// | RET | args + receiver | caller frame |
-// ^ esp ^ ebp
-
-// =={ body of runtime function }===============================================
-
-// --{ runtime returns }--------------------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// Other custom linkages (e.g. for calling directly into and out of C++) may
-// need to save callee-saved registers on the stack, which is done in the
-// function prologue of generated code.
-
-// --{ before the call instruction }--------------------------------------------
-// | caller frame |
-// ^ esp ^ ebp
-
-// --{ set up arguments in registers on stack }---------------------------------
-// | args | caller frame |
-// ^ esp ^ ebp
-// [r0 = arg0, r1 = arg1, ...]
-
-// --{ call code }--------------------------------------------------------------
-// | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// =={ prologue of called function }============================================
-// --{ push ebp }---------------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov ebp, esp }-----------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ ebp,esp
-
-// --{ save registers }---------------------------------------------------------
-// | regs | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ subi esp, #N }-----------------------------------------------------------
-// | callee frame | regs | FP | RET | args | caller frame |
-// ^esp ^ ebp
-
-// =={ body of called function }================================================
-
-// =={ epilogue of called function }============================================
-// --{ restore registers }------------------------------------------------------
-// | regs | FP | RET | args | caller frame |
-// ^ esp ^ ebp
-
-// --{ mov esp, ebp }-----------------------------------------------------------
-// | FP | RET | args | caller frame |
-// ^ esp,ebp
-
-// --{ pop ebp }----------------------------------------------------------------
-// | RET | args | caller frame |
-// ^ esp ^ ebp
-
-void CodeGenerator::FinishFrame(Frame* frame) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (saves != 0) { // Save callee-saved registers.
- DCHECK(!info()->is_osr());
- int pushed = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- ++pushed;
- }
- frame->AllocateSavedCalleeRegisterSlots(pushed);
- }
-
- // Initailize FPU state.
- __ fninit();
- __ fld1();
-}
-
-void CodeGenerator::AssembleConstructFrame() {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (frame_access_state()->has_frame()) {
- if (descriptor->IsCFunctionCall()) {
- __ push(ebp);
- __ mov(ebp, esp);
- } else if (descriptor->IsJSFunctionCall()) {
- __ Prologue(this->info()->GeneratePreagedPrologue());
- if (descriptor->PushArgumentCount()) {
- __ push(kJavaScriptCallArgCountRegister);
- }
- } else {
- __ StubPrologue(info()->GetOutputStackFrameType());
- }
- }
-
- int shrink_slots =
- frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
-
- if (info()->is_osr()) {
- // TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
-
- // Unoptimized code jumps directly to this entrypoint while the unoptimized
- // frame is still on the stack. Optimized code uses OSR values directly from
- // the unoptimized frame. Thus, all that needs to be done is to allocate the
- // remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
- osr_pc_offset_ = __ pc_offset();
- shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
-
- // Initailize FPU state.
- __ fninit();
- __ fld1();
- }
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- if (shrink_slots > 0) {
- __ sub(esp, Immediate(shrink_slots * kPointerSize));
- }
-
- if (saves != 0) { // Save callee-saved registers.
- DCHECK(!info()->is_osr());
- int pushed = 0;
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (!((1 << i) & saves)) continue;
- __ push(Register::from_code(i));
- ++pushed;
- }
- }
-}
-
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
- CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
-
- // Clear the FPU stack only if there is no return value in the stack.
- if (FLAG_debug_code && FLAG_enable_slow_asserts) {
- __ VerifyX87StackDepth(1);
- }
- bool clear_stack = true;
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- MachineRepresentation rep = descriptor->GetReturnType(i).representation();
- LinkageLocation loc = descriptor->GetReturnLocation(i);
- if (IsFloatingPoint(rep) && loc == LinkageLocation::ForRegister(0)) {
- clear_stack = false;
- break;
- }
- }
- if (clear_stack) __ fstp(0);
-
- const RegList saves = descriptor->CalleeSavedRegisters();
- // Restore registers.
- if (saves != 0) {
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (!((1 << i) & saves)) continue;
- __ pop(Register::from_code(i));
- }
- }
-
- // Might need ecx for scratch if pop_size is too big or if there is a variable
- // pop count.
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- size_t pop_size = descriptor->StackParameterCount() * kPointerSize;
- X87OperandConverter g(this, nullptr);
- if (descriptor->IsCFunctionCall()) {
- AssembleDeconstructFrame();
- } else if (frame_access_state()->has_frame()) {
- // Canonicalize JSFunction return sites for now if they always have the same
- // number of return args.
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
- if (return_label_.is_bound()) {
- __ jmp(&return_label_);
- return;
- } else {
- __ bind(&return_label_);
- AssembleDeconstructFrame();
- }
- } else {
- AssembleDeconstructFrame();
- }
- }
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & edx.bit());
- DCHECK_EQ(0u, descriptor->CalleeSavedRegisters() & ecx.bit());
- if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
- pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
- __ Ret(static_cast<int>(pop_size), ecx);
- } else {
- Register pop_reg = g.ToRegister(pop);
- Register scratch_reg = pop_reg.is(ecx) ? edx : ecx;
- __ pop(scratch_reg);
- __ lea(esp, Operand(esp, pop_reg, times_4, static_cast<int>(pop_size)));
- __ jmp(scratch_reg);
- }
-}
-
-void CodeGenerator::FinishCode() {}
-
-void CodeGenerator::AssembleMove(InstructionOperand* source,
- InstructionOperand* destination) {
- X87OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Register src = g.ToRegister(source);
- Operand dst = g.ToOperand(destination);
- __ mov(dst, src);
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsRegister() || destination->IsStackSlot());
- Operand src = g.ToOperand(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mov(dst, src);
- } else {
- Operand dst = g.ToOperand(destination);
- __ push(src);
- __ pop(dst);
- }
- } else if (source->IsConstant()) {
- Constant src_constant = g.ToConstant(source);
- if (src_constant.type() == Constant::kHeapObject) {
- Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ LoadHeapObject(dst, src);
- } else {
- DCHECK(destination->IsStackSlot());
- Operand dst = g.ToOperand(destination);
- AllowDeferredHandleDereference embedding_raw_address;
- if (isolate()->heap()->InNewSpace(*src)) {
- __ PushHeapObject(src);
- __ pop(dst);
- } else {
- __ mov(dst, src);
- }
- }
- } else if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (destination->IsStackSlot()) {
- Operand dst = g.ToOperand(destination);
- __ Move(dst, g.ToImmediate(source));
- } else if (src_constant.type() == Constant::kFloat32) {
- // TODO(turbofan): Can we do better here?
- uint32_t src = src_constant.ToFloat32AsInt();
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kInt32Size));
- __ mov(MemOperand(esp, 0), Immediate(src));
- // always only push one value into the x87 stack.
- __ fstp(0);
- __ fld_s(MemOperand(esp, 0));
- __ add(esp, Immediate(kInt32Size));
- } else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- __ Move(dst, Immediate(src));
- }
- } else {
- DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = src_constant.ToFloat64AsInt();
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (destination->IsFPRegister()) {
- __ sub(esp, Immediate(kDoubleSize));
- __ mov(MemOperand(esp, 0), Immediate(lower));
- __ mov(MemOperand(esp, kInt32Size), Immediate(upper));
- // always only push one value into the x87 stack.
- __ fstp(0);
- __ fld_d(MemOperand(esp, 0));
- __ add(esp, Immediate(kDoubleSize));
- } else {
- DCHECK(destination->IsFPStackSlot());
- Operand dst0 = g.ToOperand(destination);
- Operand dst1 = g.HighOperand(destination);
- __ Move(dst0, Immediate(lower));
- __ Move(dst1, Immediate(upper));
- }
- }
- } else if (source->IsFPRegister()) {
- DCHECK(destination->IsFPStackSlot());
- Operand dst = g.ToOperand(destination);
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fst_s(dst);
- break;
- case MachineRepresentation::kFloat64:
- __ fst_d(dst);
- break;
- default:
- UNREACHABLE();
- }
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
- Operand src = g.ToOperand(source);
- auto allocated = AllocatedOperand::cast(*source);
- if (destination->IsFPRegister()) {
- // always only push one value into the x87 stack.
- __ fstp(0);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(src);
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(src);
- break;
- default:
- UNREACHABLE();
- }
- } else {
- Operand dst = g.ToOperand(destination);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(src);
- __ fstp_s(dst);
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(src);
- __ fstp_d(dst);
- break;
- default:
- UNREACHABLE();
- }
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
-void CodeGenerator::AssembleSwap(InstructionOperand* source,
- InstructionOperand* destination) {
- X87OperandConverter g(this, nullptr);
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister() && destination->IsRegister()) {
- // Register-register.
- Register src = g.ToRegister(source);
- Register dst = g.ToRegister(destination);
- __ xchg(dst, src);
- } else if (source->IsRegister() && destination->IsStackSlot()) {
- // Register-memory.
- __ xchg(g.ToRegister(source), g.ToOperand(destination));
- } else if (source->IsStackSlot() && destination->IsStackSlot()) {
- // Memory-memory.
- Operand dst1 = g.ToOperand(destination);
- __ push(dst1);
- frame_access_state()->IncreaseSPDelta(1);
- Operand src1 = g.ToOperand(source);
- __ push(src1);
- Operand dst2 = g.ToOperand(destination);
- __ pop(dst2);
- frame_access_state()->IncreaseSPDelta(-1);
- Operand src2 = g.ToOperand(source);
- __ pop(src2);
- } else if (source->IsFPRegister() && destination->IsFPRegister()) {
- UNREACHABLE();
- } else if (source->IsFPRegister() && destination->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(g.ToOperand(destination));
- __ fxch();
- __ fstp_s(g.ToOperand(destination));
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(g.ToOperand(destination));
- __ fxch();
- __ fstp_d(g.ToOperand(destination));
- break;
- default:
- UNREACHABLE();
- }
- } else if (source->IsFPStackSlot() && destination->IsFPStackSlot()) {
- auto allocated = AllocatedOperand::cast(*source);
- switch (allocated.representation()) {
- case MachineRepresentation::kFloat32:
- __ fld_s(g.ToOperand(source));
- __ fld_s(g.ToOperand(destination));
- __ fstp_s(g.ToOperand(source));
- __ fstp_s(g.ToOperand(destination));
- break;
- case MachineRepresentation::kFloat64:
- __ fld_d(g.ToOperand(source));
- __ fld_d(g.ToOperand(destination));
- __ fstp_d(g.ToOperand(source));
- __ fstp_d(g.ToOperand(destination));
- break;
- default:
- UNREACHABLE();
- }
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
-}
-
-
-void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
- for (size_t index = 0; index < target_count; ++index) {
- __ dd(targets[index]);
- }
-}
-
-
-void CodeGenerator::EnsureSpaceForLazyDeopt() {
- if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
- return;
- }
-
- int space_needed = Deoptimizer::patch_size();
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
-}
-
-#undef __
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
deleted file mode 100644
index 5f527fd43f..0000000000
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
-#define V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
-
-#include "src/compiler/instruction.h"
-#include "src/compiler/instruction-codes.h"
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// X87-specific opcodes that specify which assembly sequence to emit.
-// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X87Add) \
- V(X87And) \
- V(X87Cmp) \
- V(X87Cmp16) \
- V(X87Cmp8) \
- V(X87Test) \
- V(X87Test16) \
- V(X87Test8) \
- V(X87Or) \
- V(X87Xor) \
- V(X87Sub) \
- V(X87Imul) \
- V(X87ImulHigh) \
- V(X87UmulHigh) \
- V(X87Idiv) \
- V(X87Udiv) \
- V(X87Not) \
- V(X87Neg) \
- V(X87Shl) \
- V(X87Shr) \
- V(X87Sar) \
- V(X87AddPair) \
- V(X87SubPair) \
- V(X87MulPair) \
- V(X87ShlPair) \
- V(X87ShrPair) \
- V(X87SarPair) \
- V(X87Ror) \
- V(X87Lzcnt) \
- V(X87Popcnt) \
- V(X87Float32Cmp) \
- V(X87Float32Add) \
- V(X87Float32Sub) \
- V(X87Float32Mul) \
- V(X87Float32Div) \
- V(X87Float32Abs) \
- V(X87Float32Neg) \
- V(X87Float32Sqrt) \
- V(X87Float32Round) \
- V(X87LoadFloat64Constant) \
- V(X87Float64Add) \
- V(X87Float64Sub) \
- V(X87Float64Mul) \
- V(X87Float64Div) \
- V(X87Float64Mod) \
- V(X87Float32Max) \
- V(X87Float64Max) \
- V(X87Float32Min) \
- V(X87Float64Min) \
- V(X87Float64Abs) \
- V(X87Float64Neg) \
- V(X87Int32ToFloat32) \
- V(X87Uint32ToFloat32) \
- V(X87Int32ToFloat64) \
- V(X87Float32ToFloat64) \
- V(X87Uint32ToFloat64) \
- V(X87Float64ToInt32) \
- V(X87Float32ToInt32) \
- V(X87Float32ToUint32) \
- V(X87Float64ToFloat32) \
- V(X87Float64ToUint32) \
- V(X87Float64ExtractHighWord32) \
- V(X87Float64ExtractLowWord32) \
- V(X87Float64InsertHighWord32) \
- V(X87Float64InsertLowWord32) \
- V(X87Float64Sqrt) \
- V(X87Float64Round) \
- V(X87Float64Cmp) \
- V(X87Float64SilenceNaN) \
- V(X87Movsxbl) \
- V(X87Movzxbl) \
- V(X87Movb) \
- V(X87Movsxwl) \
- V(X87Movzxwl) \
- V(X87Movw) \
- V(X87Movl) \
- V(X87Movss) \
- V(X87Movsd) \
- V(X87Lea) \
- V(X87BitcastFI) \
- V(X87BitcastIF) \
- V(X87Push) \
- V(X87PushFloat64) \
- V(X87PushFloat32) \
- V(X87Poke) \
- V(X87StackCheck) \
- V(X87Xchgb) \
- V(X87Xchgw) \
- V(X87Xchgl)
-
-// Addressing modes represent the "shape" of inputs to an instruction.
-// Many instructions support multiple addressing modes. Addressing modes
-// are encoded into the InstructionCode of the instruction and tell the
-// code generator after register allocation which assembler method to call.
-//
-// We use the following local notation for addressing modes:
-//
-// M = memory operand
-// R = base register
-// N = index register * N for N in {1, 2, 4, 8}
-// I = immediate displacement (int32_t)
-
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MR) /* [%r1 ] */ \
- V(MRI) /* [%r1 + K] */ \
- V(MR1) /* [%r1 + %r2*1 ] */ \
- V(MR2) /* [%r1 + %r2*2 ] */ \
- V(MR4) /* [%r1 + %r2*4 ] */ \
- V(MR8) /* [%r1 + %r2*8 ] */ \
- V(MR1I) /* [%r1 + %r2*1 + K] */ \
- V(MR2I) /* [%r1 + %r2*2 + K] */ \
- V(MR4I) /* [%r1 + %r2*3 + K] */ \
- V(MR8I) /* [%r1 + %r2*4 + K] */ \
- V(M1) /* [ %r2*1 ] */ \
- V(M2) /* [ %r2*2 ] */ \
- V(M4) /* [ %r2*4 ] */ \
- V(M8) /* [ %r2*8 ] */ \
- V(M1I) /* [ %r2*1 + K] */ \
- V(M2I) /* [ %r2*2 + K] */ \
- V(M4I) /* [ %r2*4 + K] */ \
- V(M8I) /* [ %r2*8 + K] */ \
- V(MI) /* [ K] */
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_X87_INSTRUCTION_CODES_X87_H_
diff --git a/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc b/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
deleted file mode 100644
index af86a87ad7..0000000000
--- a/deps/v8/src/compiler/x87/instruction-scheduler-x87.cc
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/instruction-scheduler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-bool InstructionScheduler::SchedulerSupported() { return false; }
-
-
-int InstructionScheduler::GetTargetInstructionFlags(
- const Instruction* instr) const {
- UNIMPLEMENTED();
-}
-
-
-int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
- UNIMPLEMENTED();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
deleted file mode 100644
index b5594b8894..0000000000
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ /dev/null
@@ -1,1881 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/base/adapters.h"
-#include "src/compiler/instruction-selector-impl.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Adds X87-specific methods for generating operands.
-class X87OperandGenerator final : public OperandGenerator {
- public:
- explicit X87OperandGenerator(InstructionSelector* selector)
- : OperandGenerator(selector) {}
-
- InstructionOperand UseByteRegister(Node* node) {
- // TODO(titzer): encode byte register use constraints.
- return UseFixed(node, edx);
- }
-
- InstructionOperand DefineAsByteRegister(Node* node) {
- // TODO(titzer): encode byte register def constraints.
- return DefineAsRegister(node);
- }
-
- bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
- int effect_level) {
- if (input->opcode() != IrOpcode::kLoad ||
- !selector()->CanCover(node, input)) {
- return false;
- }
- if (effect_level != selector()->GetEffectLevel(input)) {
- return false;
- }
- MachineRepresentation rep =
- LoadRepresentationOf(input->op()).representation();
- switch (opcode) {
- case kX87Cmp:
- case kX87Test:
- return rep == MachineRepresentation::kWord32 ||
- rep == MachineRepresentation::kTagged;
- case kX87Cmp16:
- case kX87Test16:
- return rep == MachineRepresentation::kWord16;
- case kX87Cmp8:
- case kX87Test8:
- return rep == MachineRepresentation::kWord8;
- default:
- break;
- }
- return false;
- }
-
- InstructionOperand CreateImmediate(int imm) {
- return sequence()->AddImmediate(Constant(imm));
- }
-
- bool CanBeImmediate(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
- case IrOpcode::kExternalConstant:
- case IrOpcode::kRelocatableInt32Constant:
- case IrOpcode::kRelocatableInt64Constant:
- return true;
- case IrOpcode::kHeapConstant: {
-// TODO(bmeurer): We must not dereference handles concurrently. If we
-// really have to this here, then we need to find a way to put this
-// information on the HeapConstant node already.
-#if 0
- // Constants in new space cannot be used as immediates in V8 because
- // the GC does not scan code objects when collecting the new generation.
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- Isolate* isolate = value->GetIsolate();
- return !isolate->heap()->InNewSpace(*value);
-#endif
- }
- default:
- return false;
- }
- }
-
- AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
- Node* displacement_node,
- DisplacementMode displacement_mode,
- InstructionOperand inputs[],
- size_t* input_count) {
- AddressingMode mode = kMode_MRI;
- int32_t displacement = (displacement_node == nullptr)
- ? 0
- : OpParameter<int32_t>(displacement_node);
- if (displacement_mode == kNegativeDisplacement) {
- displacement = -displacement;
- }
- if (base != nullptr) {
- if (base->opcode() == IrOpcode::kInt32Constant) {
- displacement += OpParameter<int32_t>(base);
- base = nullptr;
- }
- }
- if (base != nullptr) {
- inputs[(*input_count)++] = UseRegister(base);
- if (index != nullptr) {
- DCHECK(scale >= 0 && scale <= 3);
- inputs[(*input_count)++] = UseRegister(index);
- if (displacement != 0) {
- inputs[(*input_count)++] = TempImmediate(displacement);
- static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
- kMode_MR4I, kMode_MR8I};
- mode = kMRnI_modes[scale];
- } else {
- static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
- kMode_MR4, kMode_MR8};
- mode = kMRn_modes[scale];
- }
- } else {
- if (displacement == 0) {
- mode = kMode_MR;
- } else {
- inputs[(*input_count)++] = TempImmediate(displacement);
- mode = kMode_MRI;
- }
- }
- } else {
- DCHECK(scale >= 0 && scale <= 3);
- if (index != nullptr) {
- inputs[(*input_count)++] = UseRegister(index);
- if (displacement != 0) {
- inputs[(*input_count)++] = TempImmediate(displacement);
- static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
- kMode_M4I, kMode_M8I};
- mode = kMnI_modes[scale];
- } else {
- static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
- kMode_M4, kMode_M8};
- mode = kMn_modes[scale];
- }
- } else {
- inputs[(*input_count)++] = TempImmediate(displacement);
- return kMode_MI;
- }
- }
- return mode;
- }
-
- AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
- InstructionOperand inputs[],
- size_t* input_count) {
- BaseWithIndexAndDisplacement32Matcher m(node, AddressOption::kAllowAll);
- DCHECK(m.matches());
- if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
- return GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(),
- m.displacement_mode(), inputs, input_count);
- } else {
- inputs[(*input_count)++] = UseRegister(node->InputAt(0));
- inputs[(*input_count)++] = UseRegister(node->InputAt(1));
- return kMode_MR1;
- }
- }
-
- bool CanBeBetterLeftOperand(Node* node) const {
- return !selector()->IsLive(node);
- }
-};
-
-void InstructionSelector::VisitStackSlot(Node* node) {
- StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
- int slot = frame_->AllocateSpillSlot(rep.size());
- OperandGenerator g(this);
-
- Emit(kArchStackSlot, g.DefineAsRegister(node),
- sequence()->AddImmediate(Constant(slot)), 0, nullptr);
-}
-
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kFloat32:
- opcode = kX87Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX87Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kX87Movsxbl : kX87Movzxbl;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kX87Movsxwl : kX87Movzxwl;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kX87Movl;
- break;
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
-
- X87OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitStore(Node* node) {
- X87OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
- WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
-
- if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(rep));
- AddressingMode addressing_mode;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
- RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
- switch (write_barrier_kind) {
- case kNoWriteBarrier:
- UNREACHABLE();
- break;
- case kMapWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsMap;
- break;
- case kPointerWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsPointer;
- break;
- case kFullWriteBarrier:
- record_write_mode = RecordWriteMode::kValueIsAny;
- break;
- }
- InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
- code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
- } else {
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kX87Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX87Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kX87Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX87Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kX87Movl;
- break;
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
-
- InstructionOperand val;
- if (g.CanBeImmediate(value)) {
- val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
- val = g.UseByteRegister(value);
- } else {
- val = g.UseRegister(value);
- }
-
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
- inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
- }
-}
-
-void InstructionSelector::VisitProtectedStore(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
-}
-
-// Architecture supports unaligned access, therefore VisitLoad is used instead
-void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
-
-// Architecture supports unaligned access, therefore VisitStore is used instead
-void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X87OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- offset_operand, g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
- }
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X87OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kSimd1x4: // Fall through.
- case MachineRepresentation::kSimd1x8: // Fall through.
- case MachineRepresentation::kSimd1x16: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value)
- : ((rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit)
- ? g.UseByteRegister(value)
- : g.UseRegister(value));
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, value_operand, offset_operand,
- g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
- offset_operand, length_operand, value_operand, g.UseRegister(buffer),
- offset_operand);
- }
-}
-
-namespace {
-
-// Shared routine for multiple binary operations.
-void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- Node* left = m.left().node();
- Node* right = m.right().node();
- InstructionOperand inputs[4];
- size_t input_count = 0;
- InstructionOperand outputs[2];
- size_t output_count = 0;
-
- // TODO(turbofan): match complex addressing modes.
- if (left == right) {
- // If both inputs refer to the same operand, enforce allocating a register
- // for both of them to ensure that we don't end up generating code like
- // this:
- //
- // mov eax, [ebp-0x10]
- // add eax, [ebp-0x10]
- // jo label
- InstructionOperand const input = g.UseRegister(left);
- inputs[input_count++] = input;
- inputs[input_count++] = input;
- } else if (g.CanBeImmediate(right)) {
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.UseImmediate(right);
- } else {
- if (node->op()->HasProperty(Operator::kCommutative) &&
- g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- inputs[input_count++] = g.UseRegister(left);
- inputs[input_count++] = g.Use(right);
- }
-
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- }
-
- outputs[output_count++] = g.DefineSameAsFirst(node);
- if (cont->IsSet()) {
- outputs[output_count++] = g.DefineAsRegister(cont->result());
- }
-
- DCHECK_NE(0u, input_count);
- DCHECK_NE(0u, output_count);
- DCHECK_GE(arraysize(inputs), input_count);
- DCHECK_GE(arraysize(outputs), output_count);
-
- opcode = cont->Encode(opcode);
- if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
- } else {
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
- }
-}
-
-
-// Shared routine for multiple binary operations.
-void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode) {
- FlagsContinuation cont;
- VisitBinop(selector, node, opcode, &cont);
-}
-
-} // namespace
-
-void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kX87And);
-}
-
-
-void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kX87Or);
-}
-
-
-void InstructionSelector::VisitWord32Xor(Node* node) {
- X87OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.right().Is(-1)) {
- Emit(kX87Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
- } else {
- VisitBinop(this, node, kX87Xor);
- }
-}
-
-
-// Shared routine for multiple shift operations.
-static inline void VisitShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseImmediate(right));
- } else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.UseFixed(right, ecx));
- }
-}
-
-
-namespace {
-
-void VisitMulHigh(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- selector->Emit(
- opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
-void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(edx)};
- selector->Emit(opcode, g.DefineAsFixed(node, eax),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
-void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
- X87OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- selector->Emit(opcode, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
-}
-
-void EmitLea(InstructionSelector* selector, Node* result, Node* index,
- int scale, Node* base, Node* displacement,
- DisplacementMode displacement_mode) {
- X87OperandGenerator g(selector);
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode =
- g.GenerateMemoryOperandInputs(index, scale, base, displacement,
- displacement_mode, inputs, &input_count);
-
- DCHECK_NE(0u, input_count);
- DCHECK_GE(arraysize(inputs), input_count);
-
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(result);
-
- InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
-
- selector->Emit(opcode, 1, outputs, input_count, inputs);
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitWord32Shl(Node* node) {
- Int32ScaleMatcher m(node, true);
- if (m.matches()) {
- Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
- return;
- }
- VisitShift(this, node, kX87Shl);
-}
-
-
-void InstructionSelector::VisitWord32Shr(Node* node) {
- VisitShift(this, node, kX87Shr);
-}
-
-
-void InstructionSelector::VisitWord32Sar(Node* node) {
- VisitShift(this, node, kX87Sar);
-}
-
-void InstructionSelector::VisitInt32PairAdd(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
- g.DefineAsRegister(projection1)};
-
- InstructionOperand temps[] = {g.TempRegister()};
-
- Emit(kX87AddPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Add, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void InstructionSelector::VisitInt32PairSub(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // We use UseUniqueRegister here to avoid register sharing with the temp
- // register.
- InstructionOperand inputs[] = {
- g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)), g.UseUniqueRegister(node->InputAt(3))};
-
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node),
- g.DefineAsRegister(projection1)};
-
- InstructionOperand temps[] = {g.TempRegister()};
-
- Emit(kX87SubPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Sub, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void InstructionSelector::VisitInt32PairMul(Node* node) {
- X87OperandGenerator g(this);
-
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- // InputAt(3) explicitly shares ecx with OutputRegister(1) to save one
- // register and one mov instruction.
- InstructionOperand inputs[] = {g.UseUnique(node->InputAt(0)),
- g.UseUnique(node->InputAt(1)),
- g.UseUniqueRegister(node->InputAt(2)),
- g.UseFixed(node->InputAt(3), ecx)};
-
- InstructionOperand outputs[] = {
- g.DefineAsFixed(node, eax),
- g.DefineAsFixed(NodeProperties::FindProjection(node, 1), ecx)};
-
- InstructionOperand temps[] = {g.TempRegister(edx)};
-
- Emit(kX87MulPair, 2, outputs, 4, inputs, 1, temps);
- } else {
- // The high word of the result is not used, so we emit the standard 32 bit
- // instruction.
- Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
- g.Use(node->InputAt(2)));
- }
-}
-
-void VisitWord32PairShift(InstructionSelector* selector, InstructionCode opcode,
- Node* node) {
- X87OperandGenerator g(selector);
-
- Node* shift = node->InputAt(2);
- InstructionOperand shift_operand;
- if (g.CanBeImmediate(shift)) {
- shift_operand = g.UseImmediate(shift);
- } else {
- shift_operand = g.UseFixed(shift, ecx);
- }
- InstructionOperand inputs[] = {g.UseFixed(node->InputAt(0), eax),
- g.UseFixed(node->InputAt(1), edx),
- shift_operand};
-
- InstructionOperand outputs[2];
- InstructionOperand temps[1];
- int32_t output_count = 0;
- int32_t temp_count = 0;
- outputs[output_count++] = g.DefineAsFixed(node, eax);
- Node* projection1 = NodeProperties::FindProjection(node, 1);
- if (projection1) {
- outputs[output_count++] = g.DefineAsFixed(projection1, edx);
- } else {
- temps[temp_count++] = g.TempRegister(edx);
- }
-
- selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps);
-}
-
-void InstructionSelector::VisitWord32PairShl(Node* node) {
- VisitWord32PairShift(this, kX87ShlPair, node);
-}
-
-void InstructionSelector::VisitWord32PairShr(Node* node) {
- VisitWord32PairShift(this, kX87ShrPair, node);
-}
-
-void InstructionSelector::VisitWord32PairSar(Node* node) {
- VisitWord32PairShift(this, kX87SarPair, node);
-}
-
-void InstructionSelector::VisitWord32Ror(Node* node) {
- VisitShift(this, node, kX87Ror);
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
-
-
-void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitInt32Add(Node* node) {
- X87OperandGenerator g(this);
-
- // Try to match the Add to a lea pattern
- BaseWithIndexAndDisplacement32Matcher m(node);
- if (m.matches() &&
- (m.displacement() == nullptr || g.CanBeImmediate(m.displacement()))) {
- InstructionOperand inputs[4];
- size_t input_count = 0;
- AddressingMode mode = g.GenerateMemoryOperandInputs(
- m.index(), m.scale(), m.base(), m.displacement(), m.displacement_mode(),
- inputs, &input_count);
-
- DCHECK_NE(0u, input_count);
- DCHECK_GE(arraysize(inputs), input_count);
-
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
-
- InstructionCode opcode = AddressingModeField::encode(mode) | kX87Lea;
- Emit(opcode, 1, outputs, input_count, inputs);
- return;
- }
-
- // No lea pattern match, use add
- VisitBinop(this, node, kX87Add);
-}
-
-
-void InstructionSelector::VisitInt32Sub(Node* node) {
- X87OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().Is(0)) {
- Emit(kX87Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
- } else {
- VisitBinop(this, node, kX87Sub);
- }
-}
-
-
-void InstructionSelector::VisitInt32Mul(Node* node) {
- Int32ScaleMatcher m(node, true);
- if (m.matches()) {
- Node* index = node->InputAt(0);
- Node* base = m.power_of_two_plus_one() ? index : nullptr;
- EmitLea(this, node, index, m.scale(), base, nullptr, kPositiveDisplacement);
- return;
- }
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- if (g.CanBeImmediate(right)) {
- Emit(kX87Imul, g.DefineAsRegister(node), g.Use(left),
- g.UseImmediate(right));
- } else {
- if (g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- Emit(kX87Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
- }
-}
-
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
- VisitMulHigh(this, node, kX87ImulHigh);
-}
-
-
-void InstructionSelector::VisitUint32MulHigh(Node* node) {
- VisitMulHigh(this, node, kX87UmulHigh);
-}
-
-
-void InstructionSelector::VisitInt32Div(Node* node) {
- VisitDiv(this, node, kX87Idiv);
-}
-
-
-void InstructionSelector::VisitUint32Div(Node* node) {
- VisitDiv(this, node, kX87Udiv);
-}
-
-
-void InstructionSelector::VisitInt32Mod(Node* node) {
- VisitMod(this, node, kX87Idiv);
-}
-
-
-void InstructionSelector::VisitUint32Mod(Node* node) {
- VisitMod(this, node, kX87Udiv);
-}
-
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Int32ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Uint32ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Uint32ToFloat64, g.DefineAsFixed(node, stX_0),
- g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToFloat32, g.DefineAsFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87BitcastIF, g.DefineAsFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Add, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Sub, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mul, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Div, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Mod(Node* node) {
- X87OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempRegister(eax)};
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Mod, g.DefineAsFixed(node, stX_0), 1, temps)->MarkAsCall();
-}
-
-void InstructionSelector::VisitFloat32Max(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Max, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float32Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(kX87Float64Min, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Abs, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Sqrt, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundDown),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundDown),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundUp), g.UseFixed(node, stX_0),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundToZero),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundToZero),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- UNREACHABLE();
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float32Round | MiscField::encode(kRoundToNearest),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64Round | MiscField::encode(kRoundToNearest),
- g.UseFixed(node, stX_0), g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float32Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64Neg, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
- InstructionCode opcode) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
-}
-
-void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
- InstructionCode opcode) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(opcode, g.DefineAsFixed(node, stX_0), 0, nullptr)->MarkAsCall();
-}
-
-void InstructionSelector::EmitPrepareArguments(
- ZoneVector<PushParameter>* arguments, const CallDescriptor* descriptor,
- Node* node) {
- X87OperandGenerator g(this);
-
- // Prepare for C function call.
- if (descriptor->IsCFunctionCall()) {
- InstructionOperand temps[] = {g.TempRegister()};
- size_t const temp_count = arraysize(temps);
- Emit(kArchPrepareCallCFunction |
- MiscField::encode(static_cast<int>(descriptor->ParameterCount())),
- 0, nullptr, 0, nullptr, temp_count, temps);
-
- // Poke any stack arguments.
- for (size_t n = 0; n < arguments->size(); ++n) {
- PushParameter input = (*arguments)[n];
- if (input.node()) {
- int const slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
- Emit(kX87Poke | MiscField::encode(slot), g.NoOutput(), value);
- }
- }
- } else {
- // Push any stack arguments.
- for (PushParameter input : base::Reversed(*arguments)) {
- // TODO(titzer): handle pushing double parameters.
- if (input.node() == nullptr) continue;
- InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- Emit(kX87Push, g.NoOutput(), value);
- }
- }
-}
-
-
-bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
-
-int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 0; }
-
-namespace {
-
-void VisitCompareWithMemoryOperand(InstructionSelector* selector,
- InstructionCode opcode, Node* left,
- InstructionOperand right,
- FlagsContinuation* cont) {
- DCHECK(left->opcode() == IrOpcode::kLoad);
- X87OperandGenerator g(selector);
- size_t input_count = 0;
- InstructionOperand inputs[6];
- AddressingMode addressing_mode =
- g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
- opcode |= AddressingModeField::encode(addressing_mode);
- opcode = cont->Encode(opcode);
- inputs[input_count++] = right;
-
- if (cont->IsBranch()) {
- inputs[input_count++] = g.Label(cont->true_block());
- inputs[input_count++] = g.Label(cont->false_block());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- InstructionOperand output = g.DefineAsRegister(cont->result());
- selector->Emit(opcode, 1, &output, input_count, inputs);
- } else {
- DCHECK(cont->IsTrap());
- inputs[input_count++] = g.UseImmediate(cont->trap_id());
- selector->Emit(opcode, 0, nullptr, input_count, inputs);
- }
-}
-
-// Shared routine for multiple compare operations.
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand left, InstructionOperand right,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(opcode, g.NoOutput(), left, right,
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-
-// Shared routine for multiple compare operations.
-void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- Node* left, Node* right, FlagsContinuation* cont,
- bool commutative) {
- X87OperandGenerator g(selector);
- if (commutative && g.CanBeBetterLeftOperand(right)) {
- std::swap(left, right);
- }
- VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
-}
-
-MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
- if (hint_node->opcode() == IrOpcode::kLoad) {
- MachineType hint = LoadRepresentationOf(hint_node->op());
- if (node->opcode() == IrOpcode::kInt32Constant ||
- node->opcode() == IrOpcode::kInt64Constant) {
- int64_t constant = node->opcode() == IrOpcode::kInt32Constant
- ? OpParameter<int32_t>(node)
- : OpParameter<int64_t>(node);
- if (hint == MachineType::Int8()) {
- if (constant >= std::numeric_limits<int8_t>::min() &&
- constant <= std::numeric_limits<int8_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Uint8()) {
- if (constant >= std::numeric_limits<uint8_t>::min() &&
- constant <= std::numeric_limits<uint8_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Int16()) {
- if (constant >= std::numeric_limits<int16_t>::min() &&
- constant <= std::numeric_limits<int16_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Uint16()) {
- if (constant >= std::numeric_limits<uint16_t>::min() &&
- constant <= std::numeric_limits<uint16_t>::max()) {
- return hint;
- }
- } else if (hint == MachineType::Int32()) {
- return hint;
- } else if (hint == MachineType::Uint32()) {
- if (constant >= 0) return hint;
- }
- }
- }
- return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
- : MachineType::None();
-}
-
-// Tries to match the size of the given opcode to that of the operands, if
-// possible.
-InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
- Node* right, FlagsContinuation* cont) {
- // TODO(epertoso): we can probably get some size information out of phi nodes.
- // If the load representations don't match, both operands will be
- // zero/sign-extended to 32bit.
- MachineType left_type = MachineTypeForNarrow(left, right);
- MachineType right_type = MachineTypeForNarrow(right, left);
- if (left_type == right_type) {
- switch (left_type.representation()) {
- case MachineRepresentation::kBit:
- case MachineRepresentation::kWord8: {
- if (opcode == kX87Test) return kX87Test8;
- if (opcode == kX87Cmp) {
- if (left_type.semantic() == MachineSemantic::kUint32) {
- cont->OverwriteUnsignedIfSigned();
- } else {
- CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
- }
- return kX87Cmp8;
- }
- break;
- }
- case MachineRepresentation::kWord16:
- if (opcode == kX87Test) return kX87Test16;
- if (opcode == kX87Cmp) {
- if (left_type.semantic() == MachineSemantic::kUint32) {
- cont->OverwriteUnsignedIfSigned();
- } else {
- CHECK_EQ(MachineSemantic::kInt32, left_type.semantic());
- }
- return kX87Cmp16;
- }
- break;
- default:
- break;
- }
- }
- return opcode;
-}
-
-// Shared routine for multiple float32 compare operations (inputs commuted).
-void VisitFloat32Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
- selector->Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(1)));
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(cont->Encode(kX87Float32Cmp),
- g.DefineAsByteRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-
-// Shared routine for multiple float64 compare operations (inputs commuted).
-void VisitFloat64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- selector->Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(1)));
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.Label(cont->true_block()), g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
- cont->kind(), cont->reason(), cont->frame_state());
- } else if (cont->IsSet()) {
- selector->Emit(cont->Encode(kX87Float64Cmp),
- g.DefineAsByteRegister(cont->result()));
- } else {
- DCHECK(cont->IsTrap());
- selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
- g.UseImmediate(cont->trap_id()));
- }
-}
-
-// Shared routine for multiple word compare operations.
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
-
- InstructionCode narrowed_opcode =
- TryNarrowOpcodeSize(opcode, left, right, cont);
-
- int effect_level = selector->GetEffectLevel(node);
- if (cont->IsBranch()) {
- effect_level = selector->GetEffectLevel(
- cont->true_block()->PredecessorAt(0)->control_input());
- }
-
- // If one of the two inputs is an immediate, make sure it's on the right, or
- // if one of the two inputs is a memory operand, make sure it's on the left.
- if ((!g.CanBeImmediate(right) && g.CanBeImmediate(left)) ||
- (g.CanBeMemoryOperand(narrowed_opcode, node, right, effect_level) &&
- !g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level))) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
- // Match immediates on right side of comparison.
- if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
- return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
- g.UseImmediate(right), cont);
- }
- return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
- cont);
- }
-
- // Match memory operands on left side of comparison.
- if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
- bool needs_byte_register =
- narrowed_opcode == kX87Test8 || narrowed_opcode == kX87Cmp8;
- return VisitCompareWithMemoryOperand(
- selector, narrowed_opcode, left,
- needs_byte_register ? g.UseByteRegister(right) : g.UseRegister(right),
- cont);
- }
-
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
- return VisitCompare(selector, opcode, left, right, cont,
- node->op()->HasProperty(Operator::kCommutative));
-}
-
-void VisitWordCompare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
- X87OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
- LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
- ExternalReference js_stack_limit =
- ExternalReference::address_of_stack_limit(selector->isolate());
- if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
- // Compare(Load(js_stack_limit), LoadStackPointer)
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- InstructionCode opcode = cont->Encode(kX87StackCheck);
- if (cont->IsBranch()) {
- selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
- g.Label(cont->false_block()));
- } else if (cont->IsDeoptimize()) {
- selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
- selector->Emit(opcode, g.DefineAsRegister(cont->result()));
- }
- return;
- }
- }
- VisitWordCompare(selector, node, kX87Cmp, cont);
-}
-
-
-// Shared routine for word comparison with zero.
-void VisitWordCompareZero(InstructionSelector* selector, Node* user,
- Node* value, FlagsContinuation* cont) {
- // Try to combine with comparisons against 0 by simply inverting the branch.
- while (value->opcode() == IrOpcode::kWord32Equal &&
- selector->CanCover(user, value)) {
- Int32BinopMatcher m(value);
- if (!m.right().Is(0)) break;
-
- user = value;
- value = m.left().node();
- cont->Negate();
- }
-
- if (selector->CanCover(user, value)) {
- switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
- cont->OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kInt32LessThan:
- cont->OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kInt32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kUint32LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kUint32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kFloat32Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat32LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat32Compare(selector, value, cont);
- case IrOpcode::kFloat64Equal:
- cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThan);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
- return VisitFloat64Compare(selector, value, cont);
- case IrOpcode::kProjection:
- // Check if this is the overflow output projection of an
- // <Operation>WithOverflow node.
- if (ProjectionIndexOf(value->op()) == 1u) {
- // We cannot combine the <Operation>WithOverflow with this branch
- // unless the 0th projection (the use of the actual value of the
- // <Operation> is either nullptr, which means there's no use of the
- // actual value, or was already defined, which means it is scheduled
- // *AFTER* this branch).
- Node* const node = value->InputAt(0);
- Node* const result = NodeProperties::FindProjection(node, 0);
- if (result == nullptr || selector->IsDefined(result)) {
- switch (node->opcode()) {
- case IrOpcode::kInt32AddWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Add, cont);
- case IrOpcode::kInt32SubWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Sub, cont);
- case IrOpcode::kInt32MulWithOverflow:
- cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop(selector, node, kX87Imul, cont);
- default:
- break;
- }
- }
- }
- break;
- case IrOpcode::kInt32Sub:
- return VisitWordCompare(selector, value, cont);
- case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kX87Test, cont);
- default:
- break;
- }
- }
-
- // Continuation could not be combined with a compare, emit compare against 0.
- X87OperandGenerator g(selector);
- VisitCompare(selector, kX87Cmp, g.Use(value), g.TempImmediate(0), cont);
-}
-
-} // namespace
-
-
-void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
- BasicBlock* fbranch) {
- FlagsContinuation cont(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeIf(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
- DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitTrapUnless(Node* node,
- Runtime::FunctionId func_id) {
- FlagsContinuation cont =
- FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
- VisitWordCompareZero(this, node, node->InputAt(0), &cont);
-}
-
-void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
- X87OperandGenerator g(this);
- InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
-
- // Emit either ArchTableSwitch or ArchLookupSwitch.
- size_t table_space_cost = 4 + sw.value_range;
- size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * sw.case_count;
- size_t lookup_time_cost = sw.case_count;
- if (sw.case_count > 4 &&
- table_space_cost + 3 * table_time_cost <=
- lookup_space_cost + 3 * lookup_time_cost &&
- sw.min_value > std::numeric_limits<int32_t>::min()) {
- InstructionOperand index_operand = value_operand;
- if (sw.min_value) {
- index_operand = g.TempRegister();
- Emit(kX87Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-sw.min_value));
- }
- // Generate a table lookup.
- return EmitTableSwitch(sw, index_operand);
- }
-
- // Generate a sequence of conditional jumps.
- return EmitLookupSwitch(sw, value_operand);
-}
-
-
-void InstructionSelector::VisitWord32Equal(Node* const node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) {
- return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
- }
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThan(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThan(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
- VisitWordCompare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Add, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Add, &cont);
-}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Sub, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Sub, &cont);
-}
-
-void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
- if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop(this, node, kX87Imul, &cont);
- }
- FlagsContinuation cont;
- VisitBinop(this, node, kX87Imul, &cont);
-}
-
-void InstructionSelector::VisitFloat32Equal(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
- VisitFloat32Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64Equal(Node* node) {
- FlagsContinuation cont = FlagsContinuation::ForSet(kUnorderedEqual, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThan, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont =
- FlagsContinuation::ForSet(kUnsignedGreaterThanOrEqual, node);
- VisitFloat64Compare(this, node, &cont);
-}
-
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ExtractLowWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87Float64ExtractHighWord32, g.DefineAsRegister(node),
- g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- Emit(kX87Float64InsertLowWord32, g.UseFixed(node, stX_0), g.UseRegister(left),
- g.UseRegister(right));
-}
-
-
-void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
- X87OperandGenerator g(this);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- Emit(kX87Float64InsertHighWord32, g.UseFixed(node, stX_0),
- g.UseRegister(left), g.UseRegister(right));
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
- X87OperandGenerator g(this);
- Emit(kX87PushFloat64, g.NoOutput(), g.Use(node->InputAt(0)));
- Emit(kX87Float64SilenceNaN, g.DefineAsFixed(node, stX_0), 0, nullptr);
-}
-
-void InstructionSelector::VisitAtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
-}
-
-void InstructionSelector::VisitAtomicStore(Node* node) {
- X87OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX87Xchgb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX87Xchgw;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX87Xchgl;
- break;
- default:
- UNREACHABLE();
- break;
- }
- AddressingMode addressing_mode;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- if (g.CanBeImmediate(index)) {
- inputs[input_count++] = g.UseImmediate(index);
- addressing_mode = kMode_MRI;
- } else {
- inputs[input_count++] = g.UseUniqueRegister(index);
- addressing_mode = kMode_MR1;
- }
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
-}
-
-void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
- UNREACHABLE();
-}
-
-void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
- UNREACHABLE();
-}
-
-// static
-MachineOperatorBuilder::Flags
-InstructionSelector::SupportedMachineOperatorFlags() {
- MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kWord32ShiftIsSafe;
- if (CpuFeatures::IsSupported(POPCNT)) {
- flags |= MachineOperatorBuilder::kWord32Popcnt;
- }
-
- flags |= MachineOperatorBuilder::kFloat32RoundDown |
- MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat32RoundUp |
- MachineOperatorBuilder::kFloat64RoundUp |
- MachineOperatorBuilder::kFloat32RoundTruncate |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kFloat32RoundTiesEven |
- MachineOperatorBuilder::kFloat64RoundTiesEven;
- return flags;
-}
-
-// static
-MachineOperatorBuilder::AlignmentRequirements
-InstructionSelector::AlignmentRequirements() {
- return MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8