summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS2
-rw-r--r--deps/v8/src/compiler/access-builder.cc69
-rw-r--r--deps/v8/src/compiler/access-builder.h24
-rw-r--r--deps/v8/src/compiler/access-info.cc166
-rw-r--r--deps/v8/src/compiler/access-info.h38
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc538
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h33
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc33
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc388
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc75
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc15
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc51
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h9
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc13
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h6
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc199
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h15
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc39
-rw-r--r--deps/v8/src/compiler/code-assembler.h22
-rw-r--r--deps/v8/src/compiler/code-generator.cc102
-rw-r--r--deps/v8/src/compiler/code-generator.h37
-rw-r--r--deps/v8/src/compiler/common-operator.cc28
-rw-r--r--deps/v8/src/compiler/common-operator.h6
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc10
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc86
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame.h13
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc8
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc2
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc20
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc206
-rw-r--r--deps/v8/src/compiler/instruction.h2
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc33
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc355
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h13
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc53
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h20
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc185
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc9
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h1
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc49
-rw-r--r--deps/v8/src/compiler/js-graph.cc8
-rw-r--r--deps/v8/src/compiler/js-graph.h5
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc84
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h9
-rw-r--r--deps/v8/src/compiler/js-inlining.cc119
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc50
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc305
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h28
-rw-r--r--deps/v8/src/compiler/js-operator.cc53
-rw-r--r--deps/v8/src/compiler/js-operator.h106
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc296
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc8
-rw-r--r--deps/v8/src/compiler/load-elimination.cc57
-rw-r--r--deps/v8/src/compiler/load-elimination.h26
-rw-r--r--deps/v8/src/compiler/machine-operator.cc113
-rw-r--r--deps/v8/src/compiler/machine-operator.h45
-rw-r--r--deps/v8/src/compiler/mips/OWNERS7
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc345
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h54
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc218
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS7
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc464
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h54
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc219
-rw-r--r--deps/v8/src/compiler/node-properties.cc50
-rw-r--r--deps/v8/src/compiler/node-properties.h10
-rw-r--r--deps/v8/src/compiler/opcodes.h23
-rw-r--r--deps/v8/src/compiler/operator-properties.cc5
-rw-r--r--deps/v8/src/compiler/operator.cc6
-rw-r--r--deps/v8/src/compiler/osr.cc23
-rw-r--r--deps/v8/src/compiler/pipeline.cc122
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc13
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc45
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h20
-rw-r--r--deps/v8/src/compiler/representation-change.cc29
-rw-r--r--deps/v8/src/compiler/representation-change.h4
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc26
-rw-r--r--deps/v8/src/compiler/schedule.cc19
-rw-r--r--deps/v8/src/compiler/schedule.h6
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc834
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h46
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc100
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc1
-rw-r--r--deps/v8/src/compiler/simplified-operator.h3
-rw-r--r--deps/v8/src/compiler/typer.cc36
-rw-r--r--deps/v8/src/compiler/types.cc13
-rw-r--r--deps/v8/src/compiler/types.h7
-rw-r--r--deps/v8/src/compiler/verifier.cc47
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc279
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h39
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc224
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h43
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc43
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc97
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc8
-rw-r--r--deps/v8/src/compiler/zone-stats.cc6
-rw-r--r--deps/v8/src/compiler/zone-stats.h6
105 files changed, 5927 insertions, 1988 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 015bf85758..3a26acc668 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -9,6 +9,8 @@ danno@chromium.org
tebbi@chromium.org
per-file wasm-*=ahaas@chromium.org
+per-file wasm-*=bbudge@chromium.org
+per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 11925a84db..5fbbdd09da 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -180,6 +180,24 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
}
// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectFunction() {
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kFunctionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Function(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectReceiver() {
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kReceiverOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kContinuationOffset,
@@ -199,15 +217,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
return access;
}
-// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
- FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
- return access;
-}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
@@ -230,6 +239,36 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
}
// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kQueueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
TypeCache const& type_cache = TypeCache::Get();
FieldAccess access = {kTaggedBase,
@@ -412,9 +451,9 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
}
// static
-FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridge() {
FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -737,9 +776,9 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess AccessBuilder::ForFixedArraySlot(
size_t index, WriteBarrierKind write_barrier_kind) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase, offset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
write_barrier_kind};
return access;
}
@@ -816,7 +855,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
access.machine_type = MachineType::Float64();
break;
case FAST_HOLEY_DOUBLE_ELEMENTS:
- access.type = Type::Number();
+ access.type = Type::NumberOrHole();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
break;
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 668a720740..b4c3ed0615 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -82,16 +82,28 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
- // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
- // field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
-
// Provides access to JSGeneratorObject::register_file() field.
static FieldAccess ForJSGeneratorObjectRegisterFile();
+ // Provides access to JSGeneratorObject::function() field.
+ static FieldAccess ForJSGeneratorObjectFunction();
+
+ // Provides access to JSGeneratorObject::receiver() field.
+ static FieldAccess ForJSGeneratorObjectReceiver();
+
// Provides access to JSGeneratorObject::resume_mode() field.
static FieldAccess ForJSGeneratorObjectResumeMode();
+ // Provides access to JSAsyncGeneratorObject::queue() field.
+ static FieldAccess ForJSAsyncGeneratorObjectQueue();
+
+ // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
+ // field.
+ static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
+
+ // Provides access to JSAsyncGeneratorObject::awaited_promise() field.
+ static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
+
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
@@ -140,8 +152,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedTypedArrayBase::external_pointer() field.
static FieldAccess ForFixedTypedArrayBaseExternalPointer();
- // Provides access to DescriptorArray::enum_cache() field.
- static FieldAccess ForDescriptorArrayEnumCache();
+ // Provides access to DescriptorArray::enum_cache_bridge() field.
+ static FieldAccess ForDescriptorArrayEnumCacheBridge();
// Provides access to DescriptorArray::enum_cache_bridge_cache() field.
static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index c3096e9974..196bf9e896 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -61,26 +61,26 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
ElementAccessInfo::ElementAccessInfo() {}
-ElementAccessInfo::ElementAccessInfo(MapList const& receiver_maps,
+ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
ElementsKind elements_kind)
: elements_kind_(elements_kind), receiver_maps_(receiver_maps) {}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(MapList const& receiver_maps,
+PropertyAccessInfo PropertyAccessInfo::NotFound(MapHandles const& receiver_maps,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(holder, receiver_maps);
}
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
- MapList const& receiver_maps, Handle<Object> constant,
+ MapHandles const& receiver_maps, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(kDataConstant, holder, constant, receiver_maps);
}
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- PropertyConstness constness, MapList const& receiver_maps,
+ PropertyConstness constness, MapHandles const& receiver_maps,
FieldIndex field_index, MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -92,7 +92,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
- MapList const& receiver_maps, Handle<Object> constant,
+ MapHandles const& receiver_maps, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
}
@@ -103,7 +103,7 @@ PropertyAccessInfo::PropertyAccessInfo()
field_type_(Type::None()) {}
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
- MapList const& receiver_maps)
+ MapHandles const& receiver_maps)
: kind_(kNotFound),
receiver_maps_(receiver_maps),
holder_(holder),
@@ -112,7 +112,7 @@ PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
- MapList const& receiver_maps)
+ MapHandles const& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
constant_(constant),
@@ -123,7 +123,8 @@ PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, MachineRepresentation field_representation,
- Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
+ Type* field_type, MaybeHandle<Map> field_map,
+ MapHandles const& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
transition_map_(transition_map),
@@ -133,7 +134,8 @@ PropertyAccessInfo::PropertyAccessInfo(
field_type_(field_type),
field_map_(field_map) {}
-bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
+bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
+ AccessMode access_mode, Zone* zone) {
if (this->kind_ != that->kind_) return false;
if (this->holder_.address() != that->holder_.address()) return false;
@@ -143,14 +145,45 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
case kDataField:
case kDataConstantField: {
- // Check if we actually access the same field.
- if (this->kind_ == that->kind_ &&
- this->transition_map_.address() == that->transition_map_.address() &&
- this->field_index_ == that->field_index_ &&
- this->field_map_.address() == that->field_map_.address() &&
- this->field_type_->Is(that->field_type_) &&
- that->field_type_->Is(this->field_type_) &&
- this->field_representation_ == that->field_representation_) {
+ // Check if we actually access the same field (we use the
+ // GetFieldAccessStubKey method here just like the ICs do
+ // since that way we only compare the relevant bits of the
+ // field indices).
+ if (this->field_index_.GetFieldAccessStubKey() ==
+ that->field_index_.GetFieldAccessStubKey()) {
+ switch (access_mode) {
+ case AccessMode::kLoad: {
+ if (this->field_representation_ != that->field_representation_) {
+ if (!IsAnyTagged(this->field_representation_) ||
+ !IsAnyTagged(that->field_representation_)) {
+ return false;
+ }
+ this->field_representation_ = MachineRepresentation::kTagged;
+ }
+ if (this->field_map_.address() != that->field_map_.address()) {
+ this->field_map_ = MaybeHandle<Map>();
+ }
+ break;
+ }
+ case AccessMode::kStore:
+ case AccessMode::kStoreInLiteral: {
+ // For stores, the field map and field representation information
+ // must match exactly, otherwise we cannot merge the stores. We
+ // also need to make sure that in case of transitioning stores,
+ // the transition targets match.
+ if (this->field_map_.address() != that->field_map_.address() ||
+ this->field_representation_ != that->field_representation_ ||
+ this->transition_map_.address() !=
+ that->transition_map_.address()) {
+ return false;
+ }
+ break;
+ }
+ }
+ // Merge the field type.
+ this->field_type_ =
+ Type::Union(this->field_type_, that->field_type_, zone);
+ // Merge the receiver maps.
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -199,37 +232,52 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
// Check if it is safe to inline element access for the {map}.
if (!CanInlineElementAccess(map)) return false;
ElementsKind const elements_kind = map->elements_kind();
- *access_info = ElementAccessInfo(MapList{map}, elements_kind);
+ *access_info = ElementAccessInfo(MapHandles{map}, elements_kind);
return true;
}
-
bool AccessInfoFactory::ComputeElementAccessInfos(
- MapHandleList const& maps, AccessMode access_mode,
+ MapHandles const& maps, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos) {
+ if (access_mode == AccessMode::kLoad) {
+ // For polymorphic loads of similar elements kinds (i.e. all tagged or all
+ // double), always use the "worst case" code without a transition. This is
+ // much faster than transitioning the elements to the worst case, trading a
+ // TransitionElementsKind for a CheckMaps, avoiding mutation of the array.
+ ElementAccessInfo access_info;
+ if (ConsolidateElementLoad(maps, &access_info)) {
+ access_infos->push_back(access_info);
+ return true;
+ }
+ }
+
// Collect possible transition targets.
- MapHandleList possible_transition_targets(maps.length());
+ MapHandles possible_transition_targets;
+ possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
if (CanInlineElementAccess(map) &&
IsFastElementsKind(map->elements_kind()) &&
GetInitialFastElementsKind() != map->elements_kind()) {
- possible_transition_targets.Add(map);
+ possible_transition_targets.push_back(map);
}
}
}
// Separate the actual receiver maps and the possible transition sources.
- MapHandleList receiver_maps(maps.length());
- MapTransitionList transitions(maps.length());
+ MapHandles receiver_maps;
+ receiver_maps.reserve(maps.size());
+ MapTransitionList transitions(maps.size());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
- Map* transition_target =
- map->FindElementsKindTransitionedMap(&possible_transition_targets);
+ // Don't generate elements kind transitions from stable maps.
+ Map* transition_target = map->is_stable()
+ ? nullptr
+ : map->FindElementsKindTransitionedMap(
+ possible_transition_targets);
if (transition_target == nullptr) {
- receiver_maps.Add(map);
+ receiver_maps.push_back(map);
} else {
- DCHECK(!map->is_stable());
transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
@@ -335,7 +383,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
*access_info = PropertyAccessInfo::DataField(
- details.constness(), MapList{receiver_map}, field_index,
+ details.constness(), MapHandles{receiver_map}, field_index,
field_representation, field_type, field_map, holder);
return true;
} else {
@@ -349,7 +397,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (details.kind() == kData) {
DCHECK(!FLAG_track_constant_fields);
*access_info = PropertyAccessInfo::DataConstant(
- MapList{receiver_map},
+ MapHandles{receiver_map},
handle(descriptors->GetValue(number), isolate()), holder);
return true;
} else {
@@ -380,7 +428,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
*access_info = PropertyAccessInfo::AccessorConstant(
- MapList{receiver_map}, accessor, holder);
+ MapHandles{receiver_map}, accessor, holder);
return true;
}
}
@@ -423,7 +471,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// on the language mode of the load operation.
// Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
*access_info =
- PropertyAccessInfo::NotFound(MapList{receiver_map}, holder);
+ PropertyAccessInfo::NotFound(MapHandles{receiver_map}, holder);
return true;
} else {
return false;
@@ -442,7 +490,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
bool AccessInfoFactory::ComputePropertyAccessInfos(
- MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
+ MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
@@ -453,7 +501,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
// Try to merge the {access_info} with an existing one.
bool merged = false;
for (PropertyAccessInfo& other_info : *access_infos) {
- if (other_info.Merge(&access_info)) {
+ if (other_info.Merge(&access_info, access_mode, zone())) {
merged = true;
break;
}
@@ -464,6 +512,47 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
return true;
}
+namespace {
+
+Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
+ ElementsKind that_kind) {
+ if (IsHoleyElementsKind(this_kind)) {
+ that_kind = GetHoleyElementsKind(that_kind);
+ } else if (IsHoleyElementsKind(that_kind)) {
+ this_kind = GetHoleyElementsKind(this_kind);
+ }
+ if (this_kind == that_kind) return Just(this_kind);
+ if (IsFastDoubleElementsKind(that_kind) ==
+ IsFastDoubleElementsKind(this_kind)) {
+ if (IsMoreGeneralElementsKindTransition(that_kind, this_kind)) {
+ return Just(this_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(this_kind, that_kind)) {
+ return Just(that_kind);
+ }
+ }
+ return Nothing<ElementsKind>();
+}
+
+} // namespace
+
+bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
+ ElementAccessInfo* access_info) {
+ if (maps.empty()) return false;
+ InstanceType instance_type = maps.front()->instance_type();
+ ElementsKind elements_kind = maps.front()->elements_kind();
+ for (Handle<Map> map : maps) {
+ if (!CanInlineElementAccess(map) || map->instance_type() != instance_type) {
+ return false;
+ }
+ if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
+ .To(&elements_kind)) {
+ return false;
+ }
+ }
+ *access_info = ElementAccessInfo(maps, elements_kind);
+ return true;
+}
bool AccessInfoFactory::LookupSpecialFieldAccessor(
Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
@@ -497,8 +586,9 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
}
}
// Special fields are always mutable.
- *access_info = PropertyAccessInfo::DataField(
- kMutable, MapList{map}, field_index, field_representation, field_type);
+ *access_info =
+ PropertyAccessInfo::DataField(kMutable, MapHandles{map}, field_index,
+ field_representation, field_type);
return true;
}
return false;
@@ -556,8 +646,8 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
dependencies()->AssumeMapNotDeprecated(transition_map);
// Transitioning stores are never stores to constant fields.
*access_info = PropertyAccessInfo::DataField(
- kMutable, MapList{map}, field_index, field_representation, field_type,
- field_map, holder, transition_map);
+ kMutable, MapHandles{map}, field_index, field_representation,
+ field_type, field_map, holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 809aa83e47..7ec8deb8f0 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -10,6 +10,7 @@
#include "src/field-index.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -31,8 +32,6 @@ enum class AccessMode { kLoad, kStore, kStoreInLiteral };
std::ostream& operator<<(std::ostream&, AccessMode);
-typedef std::vector<Handle<Map>> MapList;
-
// Mapping of transition source to transition target.
typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
@@ -40,16 +39,17 @@ typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
class ElementAccessInfo final {
public:
ElementAccessInfo();
- ElementAccessInfo(MapList const& receiver_maps, ElementsKind elements_kind);
+ ElementAccessInfo(MapHandles const& receiver_maps,
+ ElementsKind elements_kind);
ElementsKind elements_kind() const { return elements_kind_; }
- MapList const& receiver_maps() const { return receiver_maps_; }
+ MapHandles const& receiver_maps() const { return receiver_maps_; }
MapTransitionList& transitions() { return transitions_; }
MapTransitionList const& transitions() const { return transitions_; }
private:
ElementsKind elements_kind_;
- MapList receiver_maps_;
+ MapHandles receiver_maps_;
MapTransitionList transitions_;
};
@@ -66,24 +66,25 @@ class PropertyAccessInfo final {
kAccessorConstant
};
- static PropertyAccessInfo NotFound(MapList const& receiver_maps,
+ static PropertyAccessInfo NotFound(MapHandles const& receiver_maps,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo DataConstant(MapList const& receiver_maps,
+ static PropertyAccessInfo DataConstant(MapHandles const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- PropertyConstness constness, MapList const& receiver_maps,
+ PropertyConstness constness, MapHandles const& receiver_maps,
FieldIndex field_index, MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
+ static PropertyAccessInfo AccessorConstant(MapHandles const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
PropertyAccessInfo();
- bool Merge(PropertyAccessInfo const* that) WARN_UNUSED_RESULT;
+ bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
+ Zone* zone) WARN_UNUSED_RESULT;
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataConstant() const { return kind() == kDataConstant; }
@@ -105,21 +106,21 @@ class PropertyAccessInfo final {
return field_representation_;
}
MaybeHandle<Map> field_map() const { return field_map_; }
- MapList const& receiver_maps() const { return receiver_maps_; }
+ MapHandles const& receiver_maps() const { return receiver_maps_; }
private:
PropertyAccessInfo(MaybeHandle<JSObject> holder,
- MapList const& receiver_maps);
+ MapHandles const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, MapList const& receiver_maps);
+ Handle<Object> constant, MapHandles const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map,
- MapList const& receiver_maps);
+ MapHandles const& receiver_maps);
Kind kind_;
- MapList receiver_maps_;
+ MapHandles receiver_maps_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
@@ -138,17 +139,18 @@ class AccessInfoFactory final {
bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
ElementAccessInfo* access_info);
- bool ComputeElementAccessInfos(MapHandleList const& maps,
- AccessMode access_mode,
+ bool ComputeElementAccessInfos(MapHandles const& maps, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos);
bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
AccessMode access_mode,
PropertyAccessInfo* access_info);
- bool ComputePropertyAccessInfos(MapHandleList const& maps, Handle<Name> name,
+ bool ComputePropertyAccessInfos(MapHandles const& maps, Handle<Name> name,
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos);
private:
+ bool ConsolidateElementLoad(MapHandles const& maps,
+ ElementAccessInfo* access_info);
bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
PropertyAccessInfo* access_info);
bool LookupTransition(Handle<Map> map, Handle<Name> name,
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index f2b7912ec5..953b6a15ea 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -347,6 +347,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return kNoCondition;
}
+int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
+ // If unary shuffle, table is src0 (2 d-registers).
+ if (src0.is(src1)) return 2;
+ // Binary shuffle, table is src0, src1. They must be consecutive
+ DCHECK_EQ(src0.code() + 1, src1.code());
+ return 4; // 4 d-registers.
+}
+
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
@@ -496,6 +504,41 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
+#define ASSEMBLE_NEON_NARROWING_OP(dt) \
+ do { \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ if (dst.is(src0) && dst.is(src1)) { \
+ __ vqmovn(dt, dst.low(), src0); \
+ __ vmov(dst.high(), dst.low()); \
+ } else if (dst.is(src0)) { \
+ __ vqmovn(dt, dst.low(), src0); \
+ __ vqmovn(dt, dst.high(), src1); \
+ } else { \
+ __ vqmovn(dt, dst.high(), src1); \
+ __ vqmovn(dt, dst.low(), src0); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_NEON_PAIRWISE_OP(op, size) \
+ do { \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ if (dst.is(src0)) { \
+ __ op(size, dst.low(), src0.low(), src0.high()); \
+ if (dst.is(src1)) { \
+ __ vmov(dst.high(), dst.low()); \
+ } else { \
+ __ op(size, dst.high(), src1.low(), src1.high()); \
+ } \
+ } else { \
+ __ op(size, dst.high(), src1.low(), src1.high()); \
+ __ op(size, dst.low(), src0.low(), src0.high()); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -503,9 +546,6 @@ void CodeGenerator::AssembleDeconstructFrame() {
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
- }
__ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
@@ -1572,17 +1612,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF32x4Splat: {
- __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+ int src_code = i.InputFloatRegister(0).code();
+ __ vdup(Neon32, i.OutputSimd128Register(),
+ DwVfpRegister::from_code(src_code / 2), src_code & 0x1);
break;
}
case kArmF32x4ExtractLane: {
__ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
- kScratchReg, i.InputInt8(1));
+ i.InputInt8(1));
break;
}
case kArmF32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+ i.InputFloatRegister(2), i.InputInt8(1));
break;
}
case kArmF32x4SConvertI32x4: {
@@ -1614,6 +1656,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmF32x4AddHoriz: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Make sure we don't overwrite source data before it's used.
+ if (dst.is(src0)) {
+ __ vpadd(dst.low(), src0.low(), src0.high());
+ if (dst.is(src1)) {
+ __ vmov(dst.high(), dst.low());
+ } else {
+ __ vpadd(dst.high(), src1.low(), src1.high());
+ }
+ } else {
+ __ vpadd(dst.high(), src1.low(), src1.high());
+ __ vpadd(dst.low(), src0.low(), src0.high());
+ }
+ break;
+ }
case kArmF32x4Sub: {
__ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1634,16 +1694,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmF32x4RecipRefine: {
- __ vrecps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
- case kArmF32x4RecipSqrtRefine: {
- __ vrsqrts(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kArmF32x4Eq: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1712,6 +1762,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI32x4AddHoriz:
+ ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon32);
+ break;
case kArmI32x4Sub: {
__ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1831,25 +1884,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt4(1));
break;
}
- case kArmI16x8SConvertI32x4: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonS16, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonS16, dst.low(), src0);
- __ vqmovn(NeonS16, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonS16, dst.high(), src1);
- __ vqmovn(NeonS16, dst.low(), src0);
- }
+ case kArmI16x8SConvertI32x4:
+ ASSEMBLE_NEON_NARROWING_OP(NeonS16);
break;
- }
case kArmI16x8Add: {
__ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1860,6 +1897,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI16x8AddHoriz:
+ ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon16);
+ break;
case kArmI16x8Sub: {
__ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1922,25 +1962,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt4(1));
break;
}
- case kArmI16x8UConvertI32x4: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonU16, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonU16, dst.low(), src0);
- __ vqmovn(NeonU16, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonU16, dst.high(), src1);
- __ vqmovn(NeonU16, dst.low(), src0);
- }
+ case kArmI16x8UConvertI32x4:
+ ASSEMBLE_NEON_NARROWING_OP(NeonU16);
break;
- }
case kArmI16x8AddSaturateU: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1999,25 +2023,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kArmI8x16SConvertI16x8: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonS8, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonS8, dst.low(), src0);
- __ vqmovn(NeonS8, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonS8, dst.high(), src1);
- __ vqmovn(NeonS8, dst.low(), src0);
- }
+ case kArmI8x16SConvertI16x8:
+ ASSEMBLE_NEON_NARROWING_OP(NeonS8);
break;
- }
case kArmI8x16Add: {
__ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2079,25 +2087,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kArmI8x16UConvertI16x8: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonU8, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonU8, dst.low(), src0);
- __ vqmovn(NeonU8, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonU8, dst.high(), src1);
- __ vqmovn(NeonU8, dst.low(), src0);
- }
+ case kArmI8x16UConvertI16x8:
+ ASSEMBLE_NEON_NARROWING_OP(NeonU8);
break;
- }
case kArmI8x16AddSaturateU: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2159,6 +2151,286 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2));
break;
}
+ case kArmS32x4ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(dst.high(), src1.low()); // dst = [0, 1, 4, 5]
+ __ vtrn(Neon32, dst.low(), dst.high()); // dst = [0, 4, 1, 5]
+ break;
+ }
+ case kArmS32x4ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from ZipLeft).
+ __ vmov(dst.low(), src1.high()); // dst = [2, 3, 6, 7]
+ __ vtrn(Neon32, dst.low(), dst.high()); // dst = [2, 6, 3, 7]
+ break;
+ }
+ case kArmS32x4UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon32, dst, kScratchQuadReg); // dst = [0, 2, 4, 6]
+ break;
+ }
+ case kArmS32x4UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon32, kScratchQuadReg, dst); // dst = [1, 3, 5, 7]
+ break;
+ }
+ case kArmS32x4TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon32, dst, kScratchQuadReg); // dst = [0, 4, 2, 6]
+ break;
+ }
+ case kArmS32x4Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Check for in-place shuffles.
+ // If dst == src0 == src1, then the shuffle is unary and we only use src0.
+ if (dst.is(src0)) {
+ __ vmov(kScratchQuadReg, src0);
+ src0 = kScratchQuadReg;
+ } else if (dst.is(src1)) {
+ __ vmov(kScratchQuadReg, src1);
+ src1 = kScratchQuadReg;
+ }
+ // Perform shuffle as a vmov per lane.
+ int dst_code = dst.code() * 4;
+ int src0_code = src0.code() * 4;
+ int src1_code = src1.code() * 4;
+ int32_t shuffle = i.InputInt32(2);
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0x7;
+ int src_code = src0_code;
+ if (lane >= 4) {
+ src_code = src1_code;
+ lane &= 0x3;
+ }
+ __ VmovExtended(dst_code + i, src_code + lane);
+ shuffle >>= 8;
+ }
+ break;
+ }
+ case kArmS32x4TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon32, kScratchQuadReg, dst); // dst = [1, 5, 3, 7]
+ break;
+ }
+ case kArmS16x8ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ vmov(dst.high(), src1.low()); // dst = [0, 1, 2, 3, 8, ... 11]
+ __ vzip(Neon16, dst.low(), dst.high()); // dst = [0, 8, 1, 9, ... 11]
+ break;
+ }
+ case kArmS16x8ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(dst.low(), src1.high());
+ __ vzip(Neon16, dst.low(), dst.high()); // dst = [4, 12, 5, 13, ... 15]
+ break;
+ }
+ case kArmS16x8UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon16, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 14]
+ break;
+ }
+ case kArmS16x8UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon16, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 15]
+ break;
+ }
+ case kArmS16x8TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon16, dst, kScratchQuadReg); // dst = [0, 8, 2, 10, ... 14]
+ break;
+ }
+ case kArmS16x8TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
+ break;
+ }
+ case kArmS16x8Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ DwVfpRegister table_base = src0.low();
+ int table_size = GetVtblTableSize(src0, src1);
+ // Convert the shuffle lane masks to byte masks in kScratchQuadReg.
+ int scratch_s_base = kScratchQuadReg.code() * 4;
+ for (int j = 0; j < 2; j++) {
+ int32_t four_lanes = i.InputInt32(2 + j);
+ for (int k = 0; k < 2; k++) {
+ uint8_t w0 = (four_lanes & 0xF) * kShortSize;
+ four_lanes >>= 8;
+ uint8_t w1 = (four_lanes & 0xF) * kShortSize;
+ four_lanes >>= 8;
+ int32_t mask = w0 | ((w0 + 1) << 8) | (w1 << 16) | ((w1 + 1) << 24);
+ // Ensure byte indices are in [0, 31] so masks are never NaNs.
+ four_lanes &= 0x1F1F1F1F;
+ __ vmov(SwVfpRegister::from_code(scratch_s_base + 2 * j + k),
+ bit_cast<float>(mask));
+ }
+ }
+ NeonListOperand table(table_base, table_size);
+ if (!dst.is(src0) && !dst.is(src1)) {
+ __ vtbl(dst.low(), table, kScratchQuadReg.low());
+ __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ } else {
+ __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
+ __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
+ __ vmov(dst, kScratchQuadReg);
+ }
+ break;
+ }
+ case kArmS8x16ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(dst.high(), src1.low());
+ __ vzip(Neon8, dst.low(), dst.high()); // dst = [0, 16, 1, 17, ... 23]
+ break;
+ }
+ case kArmS8x16ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(dst.low(), src1.high());
+ __ vzip(Neon8, dst.low(), dst.high()); // dst = [8, 24, 9, 25, ... 31]
+ break;
+ }
+ case kArmS8x16UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon8, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 30]
+ break;
+ }
+ case kArmS8x16UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon8, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 31]
+ break;
+ }
+ case kArmS8x16TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon8, dst, kScratchQuadReg); // dst = [0, 16, 2, 18, ... 30]
+ break;
+ }
+ case kArmS8x16TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon8, kScratchQuadReg, dst); // dst = [1, 17, 3, 19, ... 31]
+ break;
+ }
+ case kArmS8x16Concat: {
+ __ vext(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kArmS8x16Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ DwVfpRegister table_base = src0.low();
+ int table_size = GetVtblTableSize(src0, src1);
+ // The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
+ int scratch_s_base = kScratchQuadReg.code() * 4;
+ for (int j = 0; j < 4; j++) {
+ int32_t four_lanes = i.InputInt32(2 + j);
+ // Ensure byte indices are in [0, 31] so masks are never NaNs.
+ four_lanes &= 0x1F1F1F1F;
+ __ vmov(SwVfpRegister::from_code(scratch_s_base + j),
+ bit_cast<float>(four_lanes));
+ }
+ NeonListOperand table(table_base, table_size);
+ if (!dst.is(src0) && !dst.is(src1)) {
+ __ vtbl(dst.low(), table, kScratchQuadReg.low());
+ __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ } else {
+ __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
+ __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
+ __ vmov(dst, kScratchQuadReg);
+ }
+ break;
+ }
+ case kArmS32x2Reverse: {
+ __ vrev64(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS16x4Reverse: {
+ __ vrev64(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS16x2Reverse: {
+ __ vrev32(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x8Reverse: {
+ __ vrev64(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x4Reverse: {
+ __ vrev32(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x2Reverse: {
+ __ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kArmS1x4AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
__ vpmax(NeonU32, kScratchDoubleReg, src.low(), src.high());
@@ -2508,9 +2780,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kPointerSize));
}
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
frame->AllocateSavedCalleeRegisterSlots(
@@ -2522,14 +2792,8 @@ void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(lr, fp, pp);
- // Adjust FP to point to saved FP.
- __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- } else {
- __ Push(lr, fp);
- __ mov(fp, sp);
- }
+ __ Push(lr, fp);
+ __ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
@@ -2615,9 +2879,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
@@ -2629,9 +2891,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
int pop_count = static_cast<int>(descriptor->StackParameterCount());
// Restore registers.
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ ldm(ia_w, sp, saves);
}
@@ -2780,10 +3040,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFloatRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(dst_code, src_code, kScratchReg);
+ __ VmovExtended(dst_code, src_code);
} else {
DCHECK(destination->IsFloatStackSlot());
- __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
+ __ VmovExtended(g.ToMemOperand(destination), src_code);
}
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
@@ -2810,7 +3070,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(dst_code, src, kScratchReg);
+ __ VmovExtended(dst_code, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister dst = g.ToSimd128Register(destination);
@@ -2837,7 +3097,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else {
@@ -2895,14 +3154,14 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(temp.low().code(), src_code, kScratchReg);
- __ VmovExtended(src_code, dst_code, kScratchReg);
- __ VmovExtended(dst_code, temp.low().code(), kScratchReg);
+ __ VmovExtended(temp.low().code(), src_code);
+ __ VmovExtended(src_code, dst_code);
+ __ VmovExtended(dst_code, temp.low().code());
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
- __ VmovExtended(temp.low().code(), src_code, kScratchReg);
- __ VmovExtended(src_code, dst, kScratchReg);
+ __ VmovExtended(temp.low().code(), src_code);
+ __ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
}
} else {
@@ -2920,40 +3179,41 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
NeonMemOperand(kScratchReg));
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
+ Register temp_0 = kScratchReg;
+ LowDwVfpRegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand dst0 = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- // Restore the 0 register.
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) {
- __ vldr(kScratchDoubleReg.low(), dst);
- __ vldr(kScratchDoubleReg.high(), src);
- __ vstr(kScratchDoubleReg.low(), src);
- __ vstr(kScratchDoubleReg.high(), dst);
+ __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ vstr(temp_1.low(), src0);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- src.set_offset(src.offset() + kDoubleSize);
- dst.set_offset(dst.offset() + kDoubleSize);
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- // Restore the 0 register.
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ MemOperand src1(src0.rn(), src0.offset() + kDoubleSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize);
+ __ vldr(kScratchQuadReg.low(), dst0);
+ __ vldr(kScratchQuadReg.high(), src0);
+ __ vstr(kScratchQuadReg.low(), src0);
+ __ vstr(kScratchQuadReg.high(), dst0);
+ __ vldr(kScratchQuadReg.low(), dst1);
+ __ vldr(kScratchQuadReg.high(), src1);
+ __ vstr(kScratchQuadReg.low(), src1);
+ __ vstr(kScratchQuadReg.high(), dst1);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index e709a23f5c..db3e515c40 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -134,12 +134,11 @@ namespace compiler {
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
+ V(ArmF32x4AddHoriz) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Min) \
V(ArmF32x4Max) \
- V(ArmF32x4RecipRefine) \
- V(ArmF32x4RecipSqrtRefine) \
V(ArmF32x4Eq) \
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
@@ -154,6 +153,7 @@ namespace compiler {
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
+ V(ArmI32x4AddHoriz) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
@@ -181,6 +181,7 @@ namespace compiler {
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddHoriz) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSaturateS) \
V(ArmI16x8Mul) \
@@ -232,6 +233,34 @@ namespace compiler {
V(ArmS128Xor) \
V(ArmS128Not) \
V(ArmS128Select) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS16x8Shuffle) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmS8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
V(ArmS1x4AnyTrue) \
V(ArmS1x4AllTrue) \
V(ArmS1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index e6f3464bb5..549752d09e 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -118,12 +118,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
+ case kArmF32x4AddHoriz:
case kArmF32x4Sub:
case kArmF32x4Mul:
case kArmF32x4Min:
case kArmF32x4Max:
- case kArmF32x4RecipRefine:
- case kArmF32x4RecipSqrtRefine:
case kArmF32x4Eq:
case kArmF32x4Ne:
case kArmF32x4Lt:
@@ -138,6 +137,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Shl:
case kArmI32x4ShrS:
case kArmI32x4Add:
+ case kArmI32x4AddHoriz:
case kArmI32x4Sub:
case kArmI32x4Mul:
case kArmI32x4MinS:
@@ -165,6 +165,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8SConvertI32x4:
case kArmI16x8Add:
case kArmI16x8AddSaturateS:
+ case kArmI16x8AddHoriz:
case kArmI16x8Sub:
case kArmI16x8SubSaturateS:
case kArmI16x8Mul:
@@ -216,6 +217,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS128Xor:
case kArmS128Not:
case kArmS128Select:
+ case kArmS32x4ZipLeft:
+ case kArmS32x4ZipRight:
+ case kArmS32x4UnzipLeft:
+ case kArmS32x4UnzipRight:
+ case kArmS32x4TransposeLeft:
+ case kArmS32x4TransposeRight:
+ case kArmS32x4Shuffle:
+ case kArmS16x8ZipLeft:
+ case kArmS16x8ZipRight:
+ case kArmS16x8UnzipLeft:
+ case kArmS16x8UnzipRight:
+ case kArmS16x8TransposeLeft:
+ case kArmS16x8TransposeRight:
+ case kArmS16x8Shuffle:
+ case kArmS8x16ZipLeft:
+ case kArmS8x16ZipRight:
+ case kArmS8x16UnzipLeft:
+ case kArmS8x16UnzipRight:
+ case kArmS8x16TransposeLeft:
+ case kArmS8x16TransposeRight:
+ case kArmS8x16Concat:
+ case kArmS8x16Shuffle:
+ case kArmS32x2Reverse:
+ case kArmS16x4Reverse:
+ case kArmS16x2Reverse:
+ case kArmS8x8Reverse:
+ case kArmS8x4Reverse:
+ case kArmS8x2Reverse:
case kArmS1x4AnyTrue:
case kArmS1x4AllTrue:
case kArmS1x8AnyTrue:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index d69a82c608..8983c9b115 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -91,6 +91,27 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ // Swap inputs to save an instruction in the CodeGenerator for High ops.
+ if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight ||
+ opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight ||
+ opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight ||
+ opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight ||
+ opcode == kArmS8x16TransposeRight) {
+ Node* in0 = node->InputAt(0);
+ Node* in1 = node->InputAt(1);
+ node->ReplaceInput(0, in1);
+ node->ReplaceInput(1, in0);
+ }
+ // Use DefineSameAsFirst for binary ops that clobber their inputs, e.g. the
+ // NEON vzip, vuzp, and vtrn instructions.
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
// Use DefineSameAsFirst for ternary ops that clobber their first input,
@@ -398,6 +419,14 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -2414,80 +2443,81 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrS) \
V(I8x16ShrU)
-#define SIMD_BINOP_LIST(V) \
- V(F32x4Add, kArmF32x4Add) \
- V(F32x4Sub, kArmF32x4Sub) \
- V(F32x4Mul, kArmF32x4Mul) \
- V(F32x4Min, kArmF32x4Min) \
- V(F32x4Max, kArmF32x4Max) \
- V(F32x4RecipRefine, kArmF32x4RecipRefine) \
- V(F32x4RecipSqrtRefine, kArmF32x4RecipSqrtRefine) \
- V(F32x4Eq, kArmF32x4Eq) \
- V(F32x4Ne, kArmF32x4Ne) \
- V(F32x4Lt, kArmF32x4Lt) \
- V(F32x4Le, kArmF32x4Le) \
- V(I32x4Add, kArmI32x4Add) \
- V(I32x4Sub, kArmI32x4Sub) \
- V(I32x4Mul, kArmI32x4Mul) \
- V(I32x4MinS, kArmI32x4MinS) \
- V(I32x4MaxS, kArmI32x4MaxS) \
- V(I32x4Eq, kArmI32x4Eq) \
- V(I32x4Ne, kArmI32x4Ne) \
- V(I32x4LtS, kArmI32x4LtS) \
- V(I32x4LeS, kArmI32x4LeS) \
- V(I32x4MinU, kArmI32x4MinU) \
- V(I32x4MaxU, kArmI32x4MaxU) \
- V(I32x4LtU, kArmI32x4LtU) \
- V(I32x4LeU, kArmI32x4LeU) \
- V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
- V(I16x8Add, kArmI16x8Add) \
- V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
- V(I16x8Sub, kArmI16x8Sub) \
- V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
- V(I16x8Mul, kArmI16x8Mul) \
- V(I16x8MinS, kArmI16x8MinS) \
- V(I16x8MaxS, kArmI16x8MaxS) \
- V(I16x8Eq, kArmI16x8Eq) \
- V(I16x8Ne, kArmI16x8Ne) \
- V(I16x8LtS, kArmI16x8LtS) \
- V(I16x8LeS, kArmI16x8LeS) \
- V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
- V(I16x8MinU, kArmI16x8MinU) \
- V(I16x8MaxU, kArmI16x8MaxU) \
- V(I16x8LtU, kArmI16x8LtU) \
- V(I16x8LeU, kArmI16x8LeU) \
- V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
- V(I8x16Add, kArmI8x16Add) \
- V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
- V(I8x16Sub, kArmI8x16Sub) \
- V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
- V(I8x16Mul, kArmI8x16Mul) \
- V(I8x16MinS, kArmI8x16MinS) \
- V(I8x16MaxS, kArmI8x16MaxS) \
- V(I8x16Eq, kArmI8x16Eq) \
- V(I8x16Ne, kArmI8x16Ne) \
- V(I8x16LtS, kArmI8x16LtS) \
- V(I8x16LeS, kArmI8x16LeS) \
- V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
- V(I8x16MinU, kArmI8x16MinU) \
- V(I8x16MaxU, kArmI8x16MaxU) \
- V(I8x16LtU, kArmI8x16LtU) \
- V(I8x16LeU, kArmI8x16LeU) \
- V(S128And, kArmS128And) \
- V(S128Or, kArmS128Or) \
- V(S128Xor, kArmS128Xor) \
- V(S1x4And, kArmS128And) \
- V(S1x4Or, kArmS128Or) \
- V(S1x4Xor, kArmS128Xor) \
- V(S1x8And, kArmS128And) \
- V(S1x8Or, kArmS128Or) \
- V(S1x8Xor, kArmS128Xor) \
- V(S1x16And, kArmS128And) \
- V(S1x16Or, kArmS128Or) \
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kArmF32x4Add) \
+ V(F32x4AddHoriz, kArmF32x4AddHoriz) \
+ V(F32x4Sub, kArmF32x4Sub) \
+ V(F32x4Mul, kArmF32x4Mul) \
+ V(F32x4Min, kArmF32x4Min) \
+ V(F32x4Max, kArmF32x4Max) \
+ V(F32x4Eq, kArmF32x4Eq) \
+ V(F32x4Ne, kArmF32x4Ne) \
+ V(F32x4Lt, kArmF32x4Lt) \
+ V(F32x4Le, kArmF32x4Le) \
+ V(I32x4Add, kArmI32x4Add) \
+ V(I32x4AddHoriz, kArmI32x4AddHoriz) \
+ V(I32x4Sub, kArmI32x4Sub) \
+ V(I32x4Mul, kArmI32x4Mul) \
+ V(I32x4MinS, kArmI32x4MinS) \
+ V(I32x4MaxS, kArmI32x4MaxS) \
+ V(I32x4Eq, kArmI32x4Eq) \
+ V(I32x4Ne, kArmI32x4Ne) \
+ V(I32x4LtS, kArmI32x4LtS) \
+ V(I32x4LeS, kArmI32x4LeS) \
+ V(I32x4MinU, kArmI32x4MinU) \
+ V(I32x4MaxU, kArmI32x4MaxU) \
+ V(I32x4LtU, kArmI32x4LtU) \
+ V(I32x4LeU, kArmI32x4LeU) \
+ V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
+ V(I16x8Add, kArmI16x8Add) \
+ V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
+ V(I16x8AddHoriz, kArmI16x8AddHoriz) \
+ V(I16x8Sub, kArmI16x8Sub) \
+ V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
+ V(I16x8Mul, kArmI16x8Mul) \
+ V(I16x8MinS, kArmI16x8MinS) \
+ V(I16x8MaxS, kArmI16x8MaxS) \
+ V(I16x8Eq, kArmI16x8Eq) \
+ V(I16x8Ne, kArmI16x8Ne) \
+ V(I16x8LtS, kArmI16x8LtS) \
+ V(I16x8LeS, kArmI16x8LeS) \
+ V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
+ V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
+ V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
+ V(I16x8MinU, kArmI16x8MinU) \
+ V(I16x8MaxU, kArmI16x8MaxU) \
+ V(I16x8LtU, kArmI16x8LtU) \
+ V(I16x8LeU, kArmI16x8LeU) \
+ V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
+ V(I8x16Add, kArmI8x16Add) \
+ V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
+ V(I8x16Sub, kArmI8x16Sub) \
+ V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
+ V(I8x16Mul, kArmI8x16Mul) \
+ V(I8x16MinS, kArmI8x16MinS) \
+ V(I8x16MaxS, kArmI8x16MaxS) \
+ V(I8x16Eq, kArmI8x16Eq) \
+ V(I8x16Ne, kArmI8x16Ne) \
+ V(I8x16LtS, kArmI8x16LtS) \
+ V(I8x16LeS, kArmI8x16LeS) \
+ V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
+ V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
+ V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
+ V(I8x16MinU, kArmI8x16MinU) \
+ V(I8x16MaxU, kArmI8x16MaxU) \
+ V(I8x16LtU, kArmI8x16LtU) \
+ V(I8x16LeU, kArmI8x16LeU) \
+ V(S128And, kArmS128And) \
+ V(S128Or, kArmS128Or) \
+ V(S128Xor, kArmS128Xor) \
+ V(S1x4And, kArmS128And) \
+ V(S1x4Or, kArmS128Or) \
+ V(S1x4Xor, kArmS128Xor) \
+ V(S1x8And, kArmS128And) \
+ V(S1x8Or, kArmS128Or) \
+ V(S1x8Xor, kArmS128Xor) \
+ V(S1x16And, kArmS128And) \
+ V(S1x16Or, kArmS128Or) \
V(S1x16Xor, kArmS128Xor)
#define SIMD_VISIT_SPLAT(Type) \
@@ -2547,6 +2577,216 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
#undef SIMD_VISIT_SELECT_OP
+namespace {
+template <int LANES>
+struct ShuffleEntry {
+ uint8_t shuffle[LANES];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry<4> arch_s32x4_shuffles[] = {
+ {{0, 4, 1, 5}, kArmS32x4ZipLeft},
+ {{2, 6, 3, 7}, kArmS32x4ZipRight},
+ {{0, 2, 4, 6}, kArmS32x4UnzipLeft},
+ {{1, 3, 5, 7}, kArmS32x4UnzipRight},
+ {{0, 4, 2, 6}, kArmS32x4TransposeLeft},
+ {{1, 5, 3, 7}, kArmS32x4TransposeRight},
+ {{1, 0, 3, 2}, kArmS32x2Reverse}};
+
+static const ShuffleEntry<8> arch_s16x8_shuffles[] = {
+ {{0, 8, 1, 9, 2, 10, 3, 11}, kArmS16x8ZipLeft},
+ {{4, 12, 5, 13, 6, 14, 7, 15}, kArmS16x8ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14}, kArmS16x8UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15}, kArmS16x8UnzipRight},
+ {{0, 8, 2, 10, 4, 12, 6, 14}, kArmS16x8TransposeLeft},
+ {{1, 9, 3, 11, 5, 13, 7, 15}, kArmS16x8TransposeRight},
+ {{3, 2, 1, 0, 7, 6, 5, 4}, kArmS16x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6}, kArmS16x2Reverse}};
+
+static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kArmS8x16ZipLeft},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kArmS8x16ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kArmS8x16UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kArmS8x16UnzipRight},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kArmS8x16TransposeLeft},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kArmS8x16TransposeRight},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArmS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
+
+// Use a non-shuffle opcode to signal no match.
+static const ArchOpcode kNoShuffle = kArmS128Not;
+
+template <int LANES>
+ArchOpcode TryMatchArchShuffle(const uint8_t* shuffle,
+ const ShuffleEntry<LANES>* table,
+ size_t num_entries, uint8_t mask) {
+ for (size_t i = 0; i < num_entries; i++) {
+ const ShuffleEntry<LANES>& entry = table[i];
+ int j = 0;
+ for (; j < LANES; j++) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == LANES) return entry.opcode;
+ }
+ return kNoShuffle;
+}
+
+// Returns the bias if shuffle is a concatenation, 0 otherwise.
+template <int LANES>
+uint8_t TryMatchConcat(const uint8_t* shuffle, uint8_t mask) {
+ uint8_t start = shuffle[0];
+ int i = 1;
+ for (; i < LANES - start; i++) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return 0;
+ }
+ uint8_t wrap = LANES;
+ for (; i < LANES; i++, wrap++) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return 0;
+ }
+ return start;
+}
+
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
+ int num_lanes) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = num_lanes - 1;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < num_lanes; i++) {
+ if (shuffle[i] < num_lanes) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = num_lanes - 1;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = num_lanes - 1;
+ }
+ }
+ return mask;
+}
+
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; i--) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
+}
+
+void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
+ InstructionOperand* src0, InstructionOperand* src1) {
+ if (input0 == input1) {
+ // Unary, any q-register can be the table.
+ *src0 = *src1 = g->UseRegister(input0);
+ } else {
+ // Binary, table registers must be consecutive.
+ *src0 = g->UseFixed(input0, q0);
+ *src1 = g->UseFixed(input1, q1);
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitS32x4Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 4);
+ ArchOpcode opcode = TryMatchArchShuffle<4>(
+ shuffle, arch_s32x4_shuffles, arraysize(arch_s32x4_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ uint8_t lanes = TryMatchConcat<4>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(lanes * 4));
+ return;
+ }
+ Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)));
+}
+
+void InstructionSelector::VisitS16x8Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 8);
+ ArchOpcode opcode = TryMatchArchShuffle<8>(
+ shuffle, arch_s16x8_shuffles, arraysize(arch_s16x8_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t lanes = TryMatchConcat<8>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(lanes * 2));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArmS16x8Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)));
+}
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 16);
+ ArchOpcode opcode = TryMatchArchShuffle<16>(
+ shuffle, arch_s8x16_shuffles, arraysize(arch_s8x16_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t lanes = TryMatchConcat<16>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(lanes));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArmS8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index a72070a06d..88311c35e8 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -772,8 +772,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchPrepareCallCFunction:
// We don't need kArchPrepareCallCFunction on arm64 as the instruction
- // selector already perform a Claim to reserve space on the stack and
- // guarantee correct alignment of stack pointer.
+ // selector has already performed a Claim to reserve space on the stack.
+ // Frame alignment is always 16 bytes, and the stack pointer is already
+ // 16-byte aligned, therefore we do not need to align the stack pointer
+ // by an unknown value, and it is safe to continue accessing the frame
+ // via the stack pointer.
UNREACHABLE();
break;
case kArchPrepareTailCall:
@@ -788,9 +791,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
- // CallCFunction only supports register arguments so we never need to call
- // frame()->ClearOutgoingParameterSlots() here.
- DCHECK(frame_access_state()->sp_delta() == 0);
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -1228,14 +1230,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register prev = __ StackPointer();
if (prev.Is(jssp)) {
// TODO(titzer): make this a macro-assembler method.
- // Align the CSP and store the previous JSSP on the stack.
+ // Align the CSP and store the previous JSSP on the stack. We do not
+ // need to modify the SP delta here, as we will continue to access the
+ // frame via JSSP.
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
+ // TODO(arm64): Storing JSSP on the stack is redundant when calling a C
+ // function, as JSSP is callee-saved (we still need to do this when
+ // calling a code object that uses the CSP as the stack pointer). See
+ // the code generation for kArchCallCodeObject vs. kArchCallCFunction
+ // (the latter does not restore CSP/JSSP).
+ // MacroAssembler::CallCFunction() (safely) drops this extra slot
+ // anyway.
int sp_alignment = __ ActivationFrameAlignment();
__ Sub(tmp, jssp, kPointerSize);
- __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
- __ Mov(csp, tmp);
+ __ Bic(csp, tmp, sp_alignment - 1);
__ Str(jssp, MemOperand(csp));
if (count > 0) {
__ SetStackPointer(csp);
@@ -1259,7 +1269,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (count > 0) {
int even = RoundUp(count, 2);
__ Sub(jssp, csp, count * kPointerSize);
+ // We must also update CSP to maintain stack consistency:
__ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
+ __ AssertStackConsistency();
frame_access_state()->IncreaseSPDelta(even);
} else {
__ Mov(jssp, csp);
@@ -1994,6 +2006,53 @@ void CodeGenerator::AssembleConstructFrame() {
osr_pc_offset_ = __ pc_offset();
shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
+
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ UseScratchRegisterScope scope(masm());
+ Register scratch = scope.AcquireX();
+ __ Mov(
+ scratch,
+ Operand(ExternalReference::address_of_real_stack_limit(isolate())));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ Cmp(__ StackPointer(), scratch);
+ __ B(cs, &done);
+ }
+
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ // There is no need to leave the frame, we will not return from the
+ // runtime call.
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ DCHECK(__ StackPointer().Is(csp));
+ __ SetStackPointer(jssp);
+ __ AssertStackConsistency();
+ // Initialize the jssp because it is required for the runtime call.
+ __ Mov(jssp, csp);
+ __ Move(cp, Smi::kZero);
+ __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ Brk(0);
+ }
+ __ SetStackPointer(csp);
+ __ AssertStackConsistency();
+ __ bind(&done);
+ }
+
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, e.g. claiming the extra slot that
// other platforms explicitly push for STUB frames and frames recording
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index a471a2b8b3..0e9fd0ca2b 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -527,6 +527,15 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
ImmediateMode immediate_mode, MachineRepresentation rep,
Node* output = nullptr) {
@@ -919,7 +928,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t mask = m.right().Value();
uint32_t mask_width = base::bits::CountPopulation32(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
- if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ if ((mask_width != 0) && (mask_width != 32) &&
+ (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
@@ -1775,7 +1785,8 @@ void InstructionSelector::EmitPrepareArguments(
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
- // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
+ // ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
+ // CSP and JSSP.
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index b92a205600..fd2209ed53 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -268,9 +268,9 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
LoopBuilder* control_;
};
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop)
: isolate_(info->isolate()),
local_zone_(local_zone),
@@ -1692,7 +1692,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VisitForValues(args);
// Create node to perform the function call.
- float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
+ CallFrequency frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call =
javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
@@ -1720,7 +1720,7 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
environment()->Push(environment()->Peek(args->length()));
// Create node to perform the construct call.
- float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
+ CallFrequency frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
const Operator* call =
javascript()->Construct(args->length() + 2, frequency, feedback);
@@ -2120,7 +2120,8 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
VisitForValue(property->key());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- value = NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
+ value = NewNode(javascript()->DeleteProperty(), object, key, mode);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
@@ -2239,12 +2240,15 @@ void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
-float AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
- if (slot.IsInvalid()) return 0.0f;
+CallFrequency AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
+ if (invocation_frequency_.IsUnknown() || slot.IsInvalid()) {
+ return CallFrequency();
+ }
Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector(),
isolate());
CallICNexus nexus(feedback_vector, slot);
- return nexus.ComputeCallFrequency() * invocation_frequency_;
+ return CallFrequency(nexus.ComputeCallFrequency() *
+ invocation_frequency_.value());
}
Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
@@ -2453,8 +2457,9 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->DeleteProperty(language_mode());
- Node* result = NewNode(op, global, name);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
+ const Operator* op = javascript()->DeleteProperty();
+ Node* result = NewNode(op, global, name, mode);
PrepareFrameState(result, bailout_id, combine);
return result;
}
@@ -3014,8 +3019,9 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
// Set the control and effect to the OSR loop entry.
Node* osr_loop_entry = graph->NewNode(builder_->common()->OsrLoopEntry(),
graph->start(), graph->start());
+ Node* effect = osr_loop_entry;
UpdateControlDependency(osr_loop_entry);
- UpdateEffectDependency(osr_loop_entry);
+ UpdateEffectDependency(effect);
// Set OSR values.
for (int i = 0; i < size; ++i) {
@@ -3028,30 +3034,11 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
contexts()->back() = graph->NewNode(op_inner, osr_loop_entry);
- // Create a checkpoint.
- Node* frame_state = Checkpoint(builder_->info()->osr_ast_id());
- Node* checkpoint = graph->NewNode(common()->Checkpoint(), frame_state,
- osr_loop_entry, osr_loop_entry);
- UpdateEffectDependency(checkpoint);
-
- // Create the OSR guard nodes.
- const Operator* guard_op =
- builder_->info()->is_deoptimization_enabled()
- ? builder_->common()->OsrGuard(OsrGuardType::kUninitialized)
- : builder_->common()->OsrGuard(OsrGuardType::kAny);
- Node* effect = checkpoint;
- for (int i = 0; i < size; ++i) {
- values()->at(i) = effect =
- graph->NewNode(guard_op, values()->at(i), effect, osr_loop_entry);
- }
- contexts()->back() = effect =
- graph->NewNode(guard_op, contexts()->back(), effect, osr_loop_entry);
-
// The innermost context is the OSR value, and the outer contexts are
// reconstructed by dynamically walking up the context chain.
const Operator* load_op =
builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
- Node* osr_context = effect = contexts()->back();
+ Node* osr_context = contexts()->back();
int last = static_cast<int>(contexts()->size() - 1);
for (int i = last - 1; i >= 0; i--) {
osr_context = effect = graph->NewNode(load_op, osr_context, effect);
@@ -3172,7 +3159,7 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
+ CallFrequency invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
SourcePositionTable* source_positions, int inlining_id)
: AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
loop_assignment),
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 4fd3f35e78..1d0ba3a9c2 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -37,7 +37,7 @@ class Node;
class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop_assignment = nullptr);
virtual ~AstGraphBuilder() {}
@@ -78,7 +78,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- float const invocation_frequency_;
+ CallFrequency const invocation_frequency_;
Environment* environment_;
AstContext* ast_context_;
@@ -249,7 +249,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
VectorSlotPair CreateVectorSlotPair(FeedbackSlot slot) const;
// Computes the frequency for JSCall and JSConstruct nodes.
- float ComputeCallFrequency(FeedbackSlot slot) const;
+ CallFrequency ComputeCallFrequency(FeedbackSlot slot) const;
// ===========================================================================
// The following build methods all generate graph fragments and return one
@@ -559,7 +559,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
class AstGraphBuilderWithPositions final : public AstGraphBuilder {
public:
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop_assignment,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 6d8afe1744..e531e75b8c 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -90,6 +90,7 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
loop_end_index_queue_(zone),
end_to_header_(zone),
header_to_info_(zone),
+ osr_entry_point_(-1),
liveness_map_(bytecode_array->length(), zone) {}
namespace {
@@ -187,6 +188,10 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
if (Bytecodes::IsForwardJump(bytecode)) {
int target_offset = accessor.GetJumpTargetOffset();
out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+ } else if (Bytecodes::IsSwitch(bytecode)) {
+ for (const auto& entry : accessor.GetJumpTableTargetOffsets()) {
+ out_liveness.Union(*liveness_map.GetInLiveness(entry.target_offset));
+ }
}
// Update from next bytecode (unless there isn't one or this is an
@@ -256,7 +261,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
int loop_end = current_offset + iterator.current_bytecode_size();
- PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+ int loop_header = iterator.GetJumpTargetOffset();
+ PushLoop(loop_header, loop_end);
// Normally prefixed bytecodes are treated as if the prefix's offset was
// the actual bytecode's offset. However, the OSR id is the offset of the
@@ -270,9 +276,10 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK(!is_osr_loop ||
iterator.OffsetWithinBytecode(osr_loop_end_offset));
- // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
- // need to make sure to considered everything to be assigned.
if (is_osr_loop) {
+ osr_entry_point_ = loop_header;
+ // OSR "assigns" everything to OSR values on entry into an OSR loop, so
+ // we need to make sure to considered everything to be assigned.
loop_stack_.top().loop_info->assignments().AddAll();
}
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index ad93f8a652..63dfa3107c 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -80,6 +80,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
+ // True if the current analysis has an OSR entry point.
+ bool HasOSREntryPoint() const { return osr_entry_point_ != -1; }
+ // True if {offset} is the OSR entry loop header.
+ bool IsOSREntryPoint(int offset) const { return osr_entry_point_ == offset; }
+
// Gets the in-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
@@ -113,6 +118,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
+ int osr_entry_point_;
BytecodeLivenessMap liveness_map_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index dcaed97481..5bb9a8e976 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -82,9 +82,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
void UpdateStateValues(Node** state_values, Node** values, int count);
- void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
- const BitVector* liveness,
- int liveness_offset);
+ Node* GetStateValuesFromCache(Node** values, int count,
+ const BitVector* liveness, int liveness_offset);
int RegisterToValuesIndex(interpreter::Register the_register) const;
@@ -105,12 +104,22 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* effect_dependency_;
NodeVector values_;
Node* parameters_state_values_;
- Node* registers_state_values_;
- Node* accumulator_state_values_;
int register_base_;
int accumulator_base_;
};
+// A helper for creating a temporary sub-environment for simple branches.
+struct BytecodeGraphBuilder::SubEnvironment final {
+ public:
+ explicit SubEnvironment(BytecodeGraphBuilder* builder)
+ : builder_(builder), parent_(builder->environment()->Copy()) {}
+
+ ~SubEnvironment() { builder_->set_environment(parent_); }
+
+ private:
+ BytecodeGraphBuilder* builder_;
+ BytecodeGraphBuilder::Environment* parent_;
+};
// Issues:
// - Scopes - intimately tied to AST. Need to eval what is needed.
@@ -127,9 +136,7 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
values_(builder->local_zone()),
- parameters_state_values_(nullptr),
- registers_state_values_(nullptr),
- accumulator_state_values_(nullptr) {
+ parameters_state_values_(nullptr) {
// The layout of values_ is:
//
// [receiver] [parameters] [registers] [accumulator]
@@ -165,9 +172,7 @@ BytecodeGraphBuilder::Environment::Environment(
control_dependency_(other->control_dependency_),
effect_dependency_(other->effect_dependency_),
values_(other->zone()),
- parameters_state_values_(nullptr),
- registers_state_values_(nullptr),
- accumulator_state_values_(nullptr),
+ parameters_state_values_(other->parameters_state_values_),
register_base_(other->register_base_),
accumulator_base_(other->accumulator_base_) {
values_ = other->values_;
@@ -325,24 +330,6 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
}
-
- BailoutId loop_id(builder_->bytecode_iterator().current_offset());
- Node* frame_state =
- Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
- Node* checkpoint =
- graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
- UpdateEffectDependency(checkpoint);
-
- // Create the OSR guard nodes.
- const Operator* guard_op = common()->OsrGuard(OsrGuardType::kUninitialized);
- Node* effect = checkpoint;
- for (int i = 0; i < size; i++) {
- values()->at(i) = effect =
- graph()->NewNode(guard_op, values()->at(i), effect, entry);
- }
- Node* context = effect = graph()->NewNode(guard_op, Context(), effect, entry);
- SetContext(context);
- UpdateEffectDependency(effect);
}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
@@ -411,10 +398,9 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
-void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
- Node** state_values, Node** values, int count, const BitVector* liveness,
- int liveness_offset) {
- *state_values = builder_->state_values_cache_.GetNodeForValues(
+Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
+ Node** values, int count, const BitVector* liveness, int liveness_offset) {
+ return builder_->state_values_cache_.GetNodeForValues(
values, static_cast<size_t>(count), liveness, liveness_offset);
}
@@ -424,37 +410,27 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
// to match the parameter count.
- UpdateStateValuesWithCache(&parameters_state_values_, &values()->at(0),
- parameter_count(), nullptr, 0);
+ parameters_state_values_ = GetStateValuesFromCache(
+ &values()->at(0), parameter_count(), nullptr, 0);
} else {
UpdateStateValues(&parameters_state_values_, &values()->at(0),
parameter_count());
}
- UpdateStateValuesWithCache(&registers_state_values_,
- &values()->at(register_base()), register_count(),
- liveness ? &liveness->bit_vector() : nullptr, 0);
+ Node* registers_state_values =
+ GetStateValuesFromCache(&values()->at(register_base()), register_count(),
+ liveness ? &liveness->bit_vector() : nullptr, 0);
bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
- if (parameter_count() == 1 && accumulator_is_live &&
- values()->at(accumulator_base()) == values()->at(0)) {
- // Re-use the parameter state values if there happens to only be one
- // parameter and the accumulator is live and holds that parameter's value.
- accumulator_state_values_ = parameters_state_values_;
- } else {
- // Otherwise, use the state values cache to hopefully re-use local register
- // state values (if there is only one local register), or at the very least
- // re-use previous accumulator state values.
- UpdateStateValuesWithCache(
- &accumulator_state_values_, &values()->at(accumulator_base()), 1,
- liveness ? &liveness->bit_vector() : nullptr, register_count());
- }
+ Node* accumulator_state_value =
+ accumulator_is_live ? values()->at(accumulator_base())
+ : builder()->jsgraph()->OptimizedOutConstant();
const Operator* op = common()->FrameState(
bailout_id, combine, builder()->frame_state_function_info());
Node* result = graph()->NewNode(
- op, parameters_state_values_, registers_state_values_,
- accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
+ op, parameters_state_values_, registers_state_values,
+ accumulator_state_value, Context(), builder()->GetFunctionClosure(),
builder()->graph()->start());
return result;
@@ -463,7 +439,7 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph, CallFrequency invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
JSTypeHintLowering::Flags flags)
: local_zone_(local_zone),
@@ -482,7 +458,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_analysis_(nullptr),
environment_(nullptr),
osr_ast_id_(osr_ast_id),
- osr_loop_offset_(-1),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
@@ -638,7 +613,7 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
SourcePositionTableIterator source_position_iterator(
- bytecode_array()->source_position_table());
+ handle(bytecode_array()->SourcePositionTable()));
if (FLAG_trace_environment_liveness) {
OFStream of(stdout);
@@ -907,9 +882,10 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
jsgraph()->TheHoleConstant());
NewBranch(check_no_extension);
- Environment* true_environment = environment()->Copy();
{
+ SubEnvironment sub_environment(this);
+
NewIfFalse();
// If there is an extension, merge into the slow path.
if (slow_environment == nullptr) {
@@ -920,12 +896,9 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
}
}
- {
- set_environment(true_environment);
- NewIfTrue();
- // Do nothing on if there is no extension, eventually falling through to
- // the fast path.
- }
+ NewIfTrue();
+ // Do nothing on if there is no extension, eventually falling through to
+ // the fast path.
}
// The depth can be zero, in which case no slow-path checks are built, and the
@@ -1398,7 +1371,7 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
- float const frequency = ComputeCallFrequency(slot_id);
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->Call(arg_count, frequency, feedback,
receiver_mode, tail_call_mode);
Node* value = ProcessCallArguments(call, args, static_cast<int>(arg_count));
@@ -1680,7 +1653,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
Node* new_target = environment()->LookupAccumulator();
Node* callee = environment()->LookupRegister(callee_reg);
- float const frequency = ComputeCallFrequency(slot_id);
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->Construct(
static_cast<uint32_t>(reg_count + 2), frequency, feedback);
Node* value =
@@ -1748,9 +1721,11 @@ CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
return nexus.GetCompareOperationFeedback();
}
-float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+ if (invocation_frequency_.IsUnknown()) return CallFrequency();
CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
- return nexus.ComputeCallFrequency() * invocation_frequency_;
+ return CallFrequency(nexus.ComputeCallFrequency() *
+ invocation_frequency_.value());
}
void BytecodeGraphBuilder::VisitAdd() {
@@ -1920,8 +1895,8 @@ void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node =
- NewNode(javascript()->DeleteProperty(language_mode), object, key);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode));
+ Node* node = NewNode(javascript()->DeleteProperty(), object, key, mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2190,6 +2165,27 @@ void BytecodeGraphBuilder::VisitJumpIfNotUndefinedConstant() {
void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
+void BytecodeGraphBuilder::BuildSwitchOnSmi(Node* condition) {
+ interpreter::JumpTableTargetOffsets offsets =
+ bytecode_iterator().GetJumpTableTargetOffsets();
+
+ NewSwitch(condition, offsets.size() + 1);
+ for (const auto& entry : offsets) {
+ SubEnvironment sub_environment(this);
+ NewIfValue(entry.case_value);
+ MergeIntoSuccessorEnvironment(entry.target_offset);
+ }
+ NewIfDefault();
+}
+
+void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
+ PrepareEagerCheckpoint();
+
+ Node* acc = environment()->LookupAccumulator();
+ Node* acc_smi = NewNode(simplified()->CheckSmi(), acc);
+ BuildSwitchOnSmi(acc_smi);
+}
+
void BytecodeGraphBuilder::VisitStackCheck() {
PrepareEagerCheckpoint();
Node* node = NewNode(javascript()->StackCheck());
@@ -2300,8 +2296,6 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
- PrepareEagerCheckpoint();
-
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
@@ -2315,7 +2309,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* state =
NewNode(javascript()->GeneratorRestoreContinuation(), generator);
- environment()->BindAccumulator(state, Environment::kAttachFrameState);
+ environment()->BindAccumulator(state);
}
void BytecodeGraphBuilder::VisitWide() {
@@ -2387,7 +2381,7 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
- if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
+ if (bytecode_analysis()->IsOSREntryPoint(current_offset)) {
// For OSR add a special {OsrLoopEntry} node into the current loop header.
// It will be turned into a usable entry by the OSR deconstruction.
Environment* osr_env = environment()->Copy();
@@ -2397,15 +2391,10 @@ void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
}
void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
- if (!osr_ast_id_.IsNone()) {
+ if (bytecode_analysis()->HasOSREntryPoint()) {
// For OSR add an {OsrNormalEntry} as the the top-level environment start.
// It will be replaced with {Dead} by the OSR deconstruction.
NewNode(common()->OsrNormalEntry());
- // Translate the offset of the jump instruction to the jump target offset of
- // that instruction so that the derived BailoutId points to the loop header.
- osr_loop_offset_ =
- bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
- DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
}
}
@@ -2440,19 +2429,21 @@ void BytecodeGraphBuilder::BuildJump() {
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition);
- Environment* if_false_environment = environment()->Copy();
- NewIfTrue();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- set_environment(if_false_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfTrue();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfFalse();
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
NewBranch(condition);
- Environment* if_true_environment = environment()->Copy();
- NewIfFalse();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- set_environment(if_true_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfTrue();
}
@@ -2472,24 +2463,26 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
void BytecodeGraphBuilder::BuildJumpIfFalse() {
NewBranch(environment()->LookupAccumulator());
- Environment* if_true_environment = environment()->Copy();
- environment()->BindAccumulator(jsgraph()->FalseConstant());
- NewIfFalse();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- if_true_environment->BindAccumulator(jsgraph()->TrueConstant());
- set_environment(if_true_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ environment()->BindAccumulator(jsgraph()->FalseConstant());
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfTrue();
+ environment()->BindAccumulator(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
NewBranch(environment()->LookupAccumulator());
- Environment* if_false_environment = environment()->Copy();
- environment()->BindAccumulator(jsgraph()->TrueConstant());
- NewIfTrue();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- if_false_environment->BindAccumulator(jsgraph()->FalseConstant());
- set_environment(if_false_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfTrue();
+ environment()->BindAccumulator(jsgraph()->TrueConstant());
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfFalse();
+ environment()->BindAccumulator(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
@@ -2552,7 +2545,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceLoadNamedOperation(
@@ -2571,7 +2564,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceLoadKeyedOperation(
@@ -2590,7 +2583,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceStoreNamedOperation(
@@ -2609,7 +2602,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceStoreKeyedOperation(
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 809a995dff..b963c6a197 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -29,7 +29,7 @@ class BytecodeGraphBuilder {
BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph, CallFrequency invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags);
@@ -39,6 +39,7 @@ class BytecodeGraphBuilder {
private:
class Environment;
+ struct SubEnvironment;
void VisitBytecodes(bool stack_check);
@@ -90,11 +91,16 @@ class BytecodeGraphBuilder {
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewIfValue(int32_t value) { return NewNode(common()->IfValue(value)); }
+ Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
return NewNode(common()->Branch(hint), condition);
}
+ Node* NewSwitch(Node* condition, int control_output_count) {
+ return NewNode(common()->Switch(control_output_count), condition);
+ }
// Creates a new Phi node having {count} input values.
Node* NewPhi(int count, Node* input, Node* control);
@@ -206,7 +212,7 @@ class BytecodeGraphBuilder {
// Helper function to compute call frequency from the recorded type
// feedback.
- float ComputeCallFrequency(int slot_id) const;
+ CallFrequency ComputeCallFrequency(int slot_id) const;
// Control flow plumbing.
void BuildJump();
@@ -221,6 +227,8 @@ class BytecodeGraphBuilder {
void BuildJumpIfNotHole();
void BuildJumpIfJSReceiver();
+ void BuildSwitchOnSmi(Node* condition);
+
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
void BuildLoopHeaderEnvironment(int current_offset);
@@ -315,7 +323,7 @@ class BytecodeGraphBuilder {
Zone* local_zone_;
JSGraph* jsgraph_;
- float const invocation_frequency_;
+ CallFrequency const invocation_frequency_;
Handle<BytecodeArray> bytecode_array_;
Handle<HandlerTable> exception_handler_table_;
Handle<FeedbackVector> feedback_vector_;
@@ -325,7 +333,6 @@ class BytecodeGraphBuilder {
const BytecodeAnalysis* bytecode_analysis_;
Environment* environment_;
BailoutId osr_ast_id_;
- int osr_loop_offset_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index e4795ad0b2..d8fc12624d 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -155,6 +155,8 @@ LinkageLocation regloc(Register reg, MachineType type) {
// General code uses the above configuration data.
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
+ DCHECK_LE(msig->parameter_count(), static_cast<size_t>(kMaxCParameters));
+
LocationSignature::Builder locations(zone, msig->return_count(),
msig->parameter_count());
// Check the types of the signature.
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 1bde4c6a4c..19bb76b125 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -639,7 +639,7 @@ Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
-REPEAT_1_TO_8(INSTANTIATE, Node*)
+REPEAT_1_TO_11(INSTANTIATE, Node*)
#undef INSTANTIATE
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
@@ -709,6 +709,13 @@ Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
return raw_assembler()->CallN(desc, input_count, inputs);
}
+Node* CodeAssembler::CallCFunction1(MachineType return_type,
+ MachineType arg0_type, Node* function,
+ Node* arg0) {
+ return raw_assembler()->CallCFunction1(return_type, arg0_type, function,
+ arg0);
+}
+
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
@@ -726,6 +733,28 @@ Node* CodeAssembler::CallCFunction3(MachineType return_type,
arg2_type, function, arg0, arg1, arg2);
}
+Node* CodeAssembler::CallCFunction6(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ return raw_assembler()->CallCFunction6(
+ return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ arg5_type, function, arg0, arg1, arg2, arg3, arg4, arg5);
+}
+
+Node* CodeAssembler::CallCFunction9(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
+ return raw_assembler()->CallCFunction9(
+ return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ arg5_type, arg6_type, arg7_type, arg8_type, function, arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8);
+}
+
void CodeAssembler::Goto(Label* label) {
label->MergeVariables();
raw_assembler()->Goto(label->label_);
@@ -967,7 +996,13 @@ void CodeAssemblerLabel::MergeVariables() {
#if DEBUG
void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
- DCHECK(!bound_);
+ if (bound_) {
+ std::stringstream str;
+ str << "Cannot bind the same label twice:"
+ << "\n# current: " << debug_info
+ << "\n# previous: " << *label_->block();
+ FATAL(str.str().c_str());
+ }
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 86275ee0a0..1f2e4d8f4f 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -407,6 +407,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
Node* const* inputs);
+ // Call to a C function with one argument.
+ Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
+ Node* function, Node* arg0);
+
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@@ -417,6 +421,24 @@ class V8_EXPORT_PRIVATE CodeAssembler {
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
+ // Call to a C function with six arguments.
+ Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5);
+
+ // Call to a C function with nine arguments.
+ Node* CallCFunction9(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, MachineType arg8_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+ Node* arg7, Node* arg8);
+
// Exception handling support.
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 3723a98ebe..66232aa06f 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -60,7 +60,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(code->zone(),
- info->SourcePositionRecordingMode()) {
+ info->SourcePositionRecordingMode()),
+ result_(kSuccess) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -74,8 +75,7 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
-
-Handle<Code> CodeGenerator::GenerateCode() {
+void CodeGenerator::AssembleCode() {
CompilationInfo* info = this->info();
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -99,8 +99,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
DCHECK_EQ(0u, deoptimization_literals_.size());
for (CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info->shared_info())) {
- int index = DefineDeoptimizationLiteral(inlined.shared_info);
+ if (!inlined.shared_info.equals(info->shared_info())) {
+ int index = DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(inlined.shared_info));
inlined.RegisterInlinedFunctionId(index);
}
}
@@ -110,8 +111,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
// functions. This ensures unoptimized code is kept alive by optimized code.
for (const CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info->shared_info())) {
- DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ if (!inlined.shared_info.equals(info->shared_info())) {
+ DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(inlined.inlined_code_object_root));
}
}
@@ -173,14 +175,13 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- CodeGenResult result;
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- result = AssembleBlock(block);
+ result_ = AssembleBlock(block);
} else {
- result = AssembleBlock(block);
+ result_ = AssembleBlock(block);
}
- if (result != kSuccess) return Handle<Code>();
+ if (result_ != kSuccess) return;
unwinding_info_writer_.EndInstructionBlock(block);
}
}
@@ -226,9 +227,15 @@ Handle<Code> CodeGenerator::GenerateCode() {
unwinding_info_writer_.Finish(masm()->pc_offset());
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
+ result_ = kSuccess;
+}
+
+Handle<Code> CodeGenerator::FinalizeCode() {
+ if (result_ != kSuccess) return Handle<Code>();
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
+ masm(), unwinding_info_writer_.eh_frame_writer(), info(),
+ Handle<Object>());
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
@@ -253,7 +260,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (info->ShouldEnsureSpaceForLazyDeopt()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -299,12 +306,9 @@ bool CodeGenerator::IsMaterializableFromRoot(
const CallDescriptor* incoming_descriptor =
linkage()->GetIncomingDescriptor();
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
- RootIndexMap map(isolate());
- int root_index = map.Lookup(*object);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
- *index_return = static_cast<Heap::RootListIndex>(root_index);
- return true;
- }
+ Heap* heap = isolate()->heap();
+ return heap->IsRootHandle(object, index_return) &&
+ heap->RootCanBeTreatedAsConstant(*index_return);
}
return false;
}
@@ -578,13 +582,11 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
static_cast<int>(deoptimization_literals_.size()), TENURED);
- {
- AllowDeferredHandleDereference copy_handles;
- for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
+ for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+ Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
+ literals->set(i, *object);
}
+ data->SetLiteralArray(*literals);
Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
data->SetInliningPositions(*inl_pos);
@@ -659,11 +661,10 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
}
}
-
-int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
int result = static_cast<int>(deoptimization_literals_.size());
for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ if (deoptimization_literals_[i] == literal) return i;
}
deoptimization_literals_.push_back(literal);
return result;
@@ -725,8 +726,8 @@ void CodeGenerator::TranslateStateValueDescriptor(
DCHECK(desc->IsOptimizedOut());
if (translation != nullptr) {
if (optimized_out_literal_id_ == -1) {
- optimized_out_literal_id_ =
- DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+ optimized_out_literal_id_ = DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(isolate()->factory()->optimized_out()));
}
translation->StoreLiteral(optimized_out_literal_id_);
}
@@ -793,7 +794,8 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
}
shared_info = info()->shared_info();
}
- int shared_info_id = DefineDeoptimizationLiteral(shared_info);
+ int shared_info_id =
+ DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
switch (descriptor->type()) {
case FrameStateType::kJavaScriptFunction:
@@ -909,22 +911,23 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
CHECK(op->IsImmediate());
InstructionOperandConverter converter(this, instr);
Constant constant = converter.ToConstant(op);
- Handle<Object> constant_object;
+ DeoptimizationLiteral literal;
switch (constant.type()) {
case Constant::kInt32:
if (type.representation() == MachineRepresentation::kTagged) {
// When pointers are 4 bytes, we can use int32 constants to represent
// Smis.
DCHECK_EQ(4, kPointerSize);
- constant_object =
- handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
- DCHECK(constant_object->IsSmi());
+ Smi* smi = reinterpret_cast<Smi*>(constant.ToInt32());
+ DCHECK(smi->IsSmi());
+ literal = DeoptimizationLiteral(smi->value());
} else if (type.representation() == MachineRepresentation::kBit) {
if (constant.ToInt32() == 0) {
- constant_object = isolate()->factory()->false_value();
+ literal =
+ DeoptimizationLiteral(isolate()->factory()->false_value());
} else {
DCHECK_EQ(1, constant.ToInt32());
- constant_object = isolate()->factory()->true_value();
+ literal = DeoptimizationLiteral(isolate()->factory()->true_value());
}
} else {
// TODO(jarin,bmeurer): We currently pass in raw pointers to the
@@ -936,11 +939,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK(type.representation() != MachineRepresentation::kNone ||
constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
if (type == MachineType::Uint32()) {
- constant_object =
- isolate()->factory()->NewNumberFromUint(constant.ToInt32());
+ literal = DeoptimizationLiteral(
+ static_cast<uint32_t>(constant.ToInt32()));
} else {
- constant_object =
- isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ literal = DeoptimizationLiteral(constant.ToInt32());
}
}
break;
@@ -952,31 +954,33 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
- constant_object =
- handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
- DCHECK(constant_object->IsSmi());
+ {
+ Smi* smi = reinterpret_cast<Smi*>(constant.ToInt64());
+ DCHECK(smi->IsSmi());
+ literal = DeoptimizationLiteral(smi->value());
+ }
break;
case Constant::kFloat32:
DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
type.representation() == MachineRepresentation::kTagged);
- constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
+ literal = DeoptimizationLiteral(constant.ToFloat32());
break;
case Constant::kFloat64:
DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
type.representation() == MachineRepresentation::kTagged);
- constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
+ literal = DeoptimizationLiteral(constant.ToFloat64());
break;
case Constant::kHeapObject:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
- constant_object = constant.ToHeapObject();
+ literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
default:
UNREACHABLE();
}
- if (constant_object.is_identical_to(info()->closure())) {
+ if (literal.object().equals(info()->closure())) {
translation->StoreJSFrameFunction();
} else {
- int literal_id = DefineDeoptimizationLiteral(constant_object);
+ int literal_id = DefineDeoptimizationLiteral(literal);
translation->StoreLiteral(literal_id);
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index b4873ff2d8..5d879a28a5 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -48,6 +48,31 @@ class InstructionOperandIterator {
size_t pos_;
};
+// Either a non-null Handle<Object> or a double.
+class DeoptimizationLiteral {
+ public:
+ DeoptimizationLiteral() : object_(), number_(0) {}
+ explicit DeoptimizationLiteral(Handle<Object> object)
+ : object_(object), number_(0) {
+ DCHECK(!object_.is_null());
+ }
+ explicit DeoptimizationLiteral(double number) : object_(), number_(number) {}
+
+ Handle<Object> object() const { return object_; }
+
+ bool operator==(const DeoptimizationLiteral& other) const {
+ return object_.equals(other.object_) &&
+ bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_);
+ }
+
+ Handle<Object> Reify(Isolate* isolate) const {
+ return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
+ }
+
+ private:
+ Handle<Object> object_;
+ double number_;
+};
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
@@ -55,8 +80,11 @@ class CodeGenerator final : public GapResolver::Assembler {
explicit CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info);
- // Generate native code.
- Handle<Code> GenerateCode();
+ // Generate native code. After calling AssembleCode, call FinalizeCode to
+ // produce the actual code object. If an error occurs during either phase,
+ // FinalizeCode returns a null handle.
+ void AssembleCode();
+ Handle<Code> FinalizeCode();
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -208,7 +236,7 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
+ int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
size_t frame_state_offset);
DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
@@ -283,7 +311,7 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
- ZoneDeque<Handle<Object>> deoptimization_literals_;
+ ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
@@ -292,6 +320,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
+ CodeGenResult result_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 0b98d575b1..f87c0755b8 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -307,26 +307,6 @@ int OsrValueIndexOf(Operator const* op) {
return OpParameter<int>(op);
}
-size_t hash_value(OsrGuardType type) { return static_cast<size_t>(type); }
-
-std::ostream& operator<<(std::ostream& os, OsrGuardType type) {
- switch (type) {
- case OsrGuardType::kUninitialized:
- return os << "Uninitialized";
- case OsrGuardType::kSignedSmall:
- return os << "SignedSmall";
- case OsrGuardType::kAny:
- return os << "Any";
- }
- UNREACHABLE();
- return os;
-}
-
-OsrGuardType OsrGuardTypeOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kOsrGuard, op->opcode());
- return OpParameter<OsrGuardType>(op);
-}
-
SparseInputMask SparseInputMaskOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kStateValues ||
op->opcode() == IrOpcode::kTypedStateValues);
@@ -1010,14 +990,6 @@ const Operator* CommonOperatorBuilder::OsrValue(int index) {
index); // parameter
}
-const Operator* CommonOperatorBuilder::OsrGuard(OsrGuardType type) {
- return new (zone()) Operator1<OsrGuardType>( // --
- IrOpcode::kOsrGuard, Operator::kNoThrow, // opcode
- "OsrGuard", // name
- 1, 1, 1, 1, 1, 0, // counts
- type); // parameter
-}
-
const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
return new (zone()) Operator1<int32_t>( // --
IrOpcode::kInt32Constant, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index d54bcc5311..2b51a814fe 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -287,11 +287,6 @@ Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
int OsrValueIndexOf(Operator const*);
-enum class OsrGuardType { kUninitialized, kSignedSmall, kAny };
-size_t hash_value(OsrGuardType type);
-std::ostream& operator<<(std::ostream&, OsrGuardType);
-OsrGuardType OsrGuardTypeOf(Operator const*);
-
SparseInputMask SparseInputMaskOf(Operator const*);
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
@@ -337,7 +332,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* OsrNormalEntry();
const Operator* OsrLoopEntry();
const Operator* OsrValue(int index);
- const Operator* OsrGuard(OsrGuardType type);
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 0e48932c8d..6a75e8cff2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -26,7 +26,8 @@ EffectControlLinearizer::EffectControlLinearizer(
schedule_(schedule),
temp_zone_(temp_zone),
source_positions_(source_positions),
- graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
+ graph_assembler_(js_graph, nullptr, nullptr, temp_zone),
+ frame_state_zapper_(nullptr) {}
Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -429,6 +430,7 @@ void EffectControlLinearizer::Run() {
if (block_effects.For(block->PredecessorAt(i), block)
.current_frame_state != frame_state) {
frame_state = nullptr;
+ frame_state_zapper_ = graph()->end();
break;
}
}
@@ -502,6 +504,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
if (region_observability_ == RegionObservability::kObservable &&
!node->op()->HasProperty(Operator::kNoWrite)) {
*frame_state = nullptr;
+ frame_state_zapper_ = node;
}
// Remove the end markers of 'atomic' allocation region because the
@@ -681,6 +684,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
+ if (frame_state == nullptr) {
+ V8_Fatal(__FILE__, __LINE__, "No frame state (zapped by #%d: %s)",
+ frame_state_zapper_->id(),
+ frame_state_zapper_->op()->mnemonic());
+ }
result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt32:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index a1eb03cd11..bc18ff8162 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -155,6 +155,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
GraphAssembler graph_assembler_;
+ Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 75a73ffce9..52935e0041 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -168,6 +168,9 @@ class VirtualObject : public ZoneObject {
bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
void SetField(size_t offset, Node* node, bool created_phi = false) {
+ TRACE(" VirtualObject(%p)[%zu] changes from #%i to #%i\n",
+ static_cast<void*>(this), offset,
+ fields_[offset] ? fields_[offset]->id() : -1, node ? node->id() : -1);
fields_[offset] = node;
phi_[offset] = created_phi;
}
@@ -234,6 +237,8 @@ class VirtualObject : public ZoneObject {
DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
bool VirtualObject::UpdateFrom(const VirtualObject& other) {
+ TRACE("%p.UpdateFrom(%p)\n", static_cast<void*>(this),
+ static_cast<const void*>(&other));
bool changed = status_ != other.status_;
status_ = other.status_;
phi_ = other.phi_;
@@ -425,19 +430,6 @@ bool IsEquivalentPhi(Node* node1, Node* node2) {
return true;
}
-bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
- if (phi->opcode() != IrOpcode::kPhi) return false;
- if (static_cast<size_t>(phi->op()->ValueInputCount()) != inputs.size()) {
- return false;
- }
- for (size_t i = 0; i < inputs.size(); ++i) {
- Node* input = NodeProperties::GetValueInput(phi, static_cast<int>(i));
- if (!IsEquivalentPhi(input, inputs[i])) {
- return false;
- }
- }
- return true;
-}
} // namespace
bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -446,19 +438,16 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
int value_input_count = static_cast<int>(cache->fields().size());
Node* rep = GetField(i);
if (!rep || !IsCreatedPhi(i)) {
- Type* phi_type = Type::None();
for (Node* input : cache->fields()) {
CHECK_NOT_NULL(input);
CHECK(!input->IsDead());
- Type* input_type = NodeProperties::GetType(input);
- phi_type = Type::Union(phi_type, input_type, graph->zone());
}
Node* control = NodeProperties::GetControlInput(at);
cache->fields().push_back(control);
Node* phi = graph->NewNode(
common->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache->fields().front());
- NodeProperties::SetType(phi, phi_type);
+ NodeProperties::SetType(phi, Type::Any());
SetField(i, phi, true);
#ifdef DEBUG
@@ -1269,6 +1258,11 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
DCHECK_NOT_NULL(virtual_states_[effect->id()]);
if (virtual_states_[node->id()]) {
+ TRACE("Updating virtual state %p at %s#%d from virtual state %p at %s#%d\n",
+ static_cast<void*>(virtual_states_[node->id()]),
+ node->op()->mnemonic(), node->id(),
+ static_cast<void*>(virtual_states_[effect->id()]),
+ effect->op()->mnemonic(), effect->id());
virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
zone());
} else {
@@ -1452,6 +1446,7 @@ bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
namespace {
+#ifdef DEBUG
bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
#if V8_TARGET_LITTLE_ENDIAN
return (access.offset % kPointerSize) == 0;
@@ -1461,6 +1456,7 @@ bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
kPointerSize) == 0;
#endif
}
+#endif
int OffsetForFieldAccess(Node* node) {
FieldAccess access = FieldAccessOf(node->op());
@@ -1478,48 +1474,6 @@ int OffsetForElementAccess(Node* node, int index) {
} // namespace
-void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
- VirtualState* state) {
- TRACE("Load #%d from phi #%d", load->id(), from->id());
-
- cache_->fields().clear();
- for (int i = 0; i < load->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(load, i);
- cache_->fields().push_back(input);
- }
-
- cache_->LoadVirtualObjectsForFieldsFrom(state,
- status_analysis_->GetAliasMap());
- if (cache_->objects().size() == cache_->fields().size()) {
- cache_->GetFields(offset);
- if (cache_->fields().size() == cache_->objects().size()) {
- Node* rep = replacement(load);
- if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
- int value_input_count = static_cast<int>(cache_->fields().size());
- Type* phi_type = Type::None();
- for (Node* input : cache_->fields()) {
- Type* input_type = NodeProperties::GetType(input);
- phi_type = Type::Union(phi_type, input_type, graph()->zone());
- }
- cache_->fields().push_back(NodeProperties::GetControlInput(from));
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, value_input_count),
- value_input_count + 1, &cache_->fields().front());
- NodeProperties::SetType(phi, phi_type);
- status_analysis_->ResizeStatusVector();
- SetReplacement(load, phi);
- TRACE(" got phi created.\n");
- } else {
- TRACE(" has already phi #%d.\n", rep->id());
- }
- } else {
- TRACE(" has incomplete field info.\n");
- }
- } else {
- TRACE(" has incomplete virtual object info.\n");
- }
-}
-
void EscapeAnalysis::ProcessLoadField(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
ForwardVirtualState(node);
@@ -1548,11 +1502,6 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
}
// Record that the load has this alias.
UpdateReplacement(state, node, value);
- } else if (from->opcode() == IrOpcode::kPhi &&
- IsOffsetForFieldAccessCorrect(FieldAccessOf(node->op()))) {
- int offset = OffsetForFieldAccess(node);
- // Only binary phis are supported for now.
- ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
}
@@ -1620,9 +1569,6 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
}
// Record that the load has this alias.
UpdateReplacement(state, node, value);
- } else if (from->opcode() == IrOpcode::kPhi) {
- int offset = OffsetForElementAccess(node, index.Value());
- ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
}
@@ -1670,8 +1616,8 @@ void EscapeAnalysis::ProcessStoreField(Node* node) {
FieldAccessOf(node->op()).offset == Name::kHashFieldOffset);
val = slot_not_analyzed_;
}
+ object = CopyForModificationAt(object, state, node);
if (object->GetField(offset) != val) {
- object = CopyForModificationAt(object, state, node);
object->SetField(offset, val);
}
}
@@ -1694,8 +1640,8 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
int offset = OffsetForElementAccess(node, index.Value());
if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
+ object = CopyForModificationAt(object, state, node);
if (object->GetField(offset) != val) {
- object = CopyForModificationAt(object, state, node);
object->SetField(offset, val);
}
}
@@ -1710,8 +1656,8 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
}
if (VirtualObject* object = GetVirtualObject(state, to)) {
if (!object->IsTracked()) return;
+ object = CopyForModificationAt(object, state, node);
if (!object->AllFieldsClear()) {
- object = CopyForModificationAt(object, state, node);
object->ClearAllFields();
TRACE("Cleared all fields of @%d:#%d\n",
status_analysis_->GetAlias(object->id()), object->id());
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index e5e8aa362a..a136e568da 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -53,8 +53,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
void ProcessCall(Node* node);
void ProcessStart(Node* node);
bool ProcessEffectPhi(Node* node);
- void ProcessLoadFromPhi(int offset, Node* from, Node* node,
- VirtualState* states);
void ForwardVirtualState(Node* node);
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index a4d6829cfa..8a6c18951a 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -111,9 +111,18 @@ class Frame : public ZoneObject {
frame_slot_count_ += count;
}
- int AllocateSpillSlot(int width) {
+ int AllocateSpillSlot(int width, int alignment = 0) {
int frame_slot_count_before = frame_slot_count_;
- AllocateAlignedFrameSlots(width);
+ if (alignment <= kPointerSize) {
+ AllocateAlignedFrameSlots(width);
+ } else {
+ // We need to allocate more place for spill slot
+ // in case we need an aligned spill slot to be
+ // able to properly align start of spill slot
+ // and still have enough place to hold all the
+ // data
+ AllocateAlignedFrameSlots(width + alignment - kPointerSize);
+ }
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
return frame_slot_count_ - 1;
}
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 117e569ad8..cf4d9154e4 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -56,7 +56,7 @@ void GraphReducer::ReduceNode(Node* node) {
ReduceTop();
} else if (!revisit_.empty()) {
// If the stack becomes empty, revisit any nodes in the revisit queue.
- Node* const node = revisit_.top();
+ Node* const node = revisit_.front();
revisit_.pop();
if (state_.Get(node) == State::kRevisit) {
// state can change while in queue.
@@ -146,6 +146,10 @@ void GraphReducer::ReduceTop() {
// Check if the reduction is an in-place update of the {node}.
Node* const replacement = reduction.replacement();
if (replacement == node) {
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- In-place update of " << *replacement << std::endl;
+ }
// In-place update of {node}, may need to recurse on an input.
Node::Inputs node_inputs = node->inputs();
for (int i = 0; i < node_inputs.count(); ++i) {
@@ -240,8 +244,6 @@ void GraphReducer::ReplaceWithValue(Node* node, Node* value, Node* effect,
DCHECK_NOT_NULL(control);
edge.UpdateTo(control);
Revisit(user);
- // TODO(jarin) Check that the node cannot throw (otherwise, it
- // would have to be connected via IfSuccess/IfException).
}
} else if (NodeProperties::IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index b95cf9df2d..d271881872 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -174,7 +174,7 @@ class V8_EXPORT_PRIVATE GraphReducer
Node* const dead_;
NodeMarker<State> state_;
ZoneVector<Reducer*> reducers_;
- ZoneStack<Node*> revisit_;
+ ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 9dbf19c3f5..dabdab3810 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -780,7 +780,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Label binop; \
__ bind(&binop); \
__ mov_inst(eax, i.MemoryOperand(1)); \
- __ mov_inst(i.TempRegister(0), Operand(eax)); \
+ __ Move(i.TempRegister(0), eax); \
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
__ lock(); \
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 6fd1ad5656..dccfced9e1 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -212,6 +212,14 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1864,11 +1872,7 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- inputs[input_count++] = g.UseByteRegister(value);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
+ inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
@@ -1879,7 +1883,11 @@ void InstructionSelector::VisitAtomicBinaryOperation(
}
outputs[0] = g.DefineAsFixed(node, eax);
InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ temp[0] = g.UseByteRegister(node);
+ } else {
+ temp[0] = g.TempRegister();
+ }
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index a9b935d5b6..1d07799511 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -513,6 +513,11 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
values->PushArgumentsElements(IsRestOf(input->op()));
+ // The elements backing store of an arguments object participates in the
+ // duplicate object counting, but can itself never appear duplicated.
+ DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
+ deduplicator->GetObjectId(input));
+ deduplicator->InsertObject(input);
return 0;
}
case IrOpcode::kArgumentsLengthState: {
@@ -921,9 +926,14 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (block->SuccessorCount() > 1) {
for (BasicBlock* const successor : block->successors()) {
for (Node* const node : *successor) {
- // If this CHECK fails, you might have specified merged variables
- // for a label with only one predecessor.
- CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
+ if (IrOpcode::IsPhiOpcode(node->opcode())) {
+ std::ostringstream str;
+ str << "You might have specified merged variables for a label with "
+ << "only one predecessor." << std::endl
+ << "# Current Block: " << *successor << std::endl
+ << "# Node: " << *node;
+ FATAL(str.str().c_str());
+ }
}
}
}
@@ -1500,10 +1510,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Neg(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
- case IrOpcode::kF32x4RecipRefine:
- return MarkAsSimd128(node), VisitF32x4RecipRefine(node);
+ case IrOpcode::kF32x4RecipSqrtApprox:
+ return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
case IrOpcode::kF32x4Add:
return MarkAsSimd128(node), VisitF32x4Add(node);
+ case IrOpcode::kF32x4AddHoriz:
+ return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
case IrOpcode::kF32x4Sub:
return MarkAsSimd128(node), VisitF32x4Sub(node);
case IrOpcode::kF32x4Mul:
@@ -1512,10 +1524,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Min(node);
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
- case IrOpcode::kF32x4RecipSqrtApprox:
- return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
- case IrOpcode::kF32x4RecipSqrtRefine:
- return MarkAsSimd128(node), VisitF32x4RecipSqrtRefine(node);
case IrOpcode::kF32x4Eq:
return MarkAsSimd1x4(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
@@ -1544,6 +1552,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ShrS(node);
case IrOpcode::kI32x4Add:
return MarkAsSimd128(node), VisitI32x4Add(node);
+ case IrOpcode::kI32x4AddHoriz:
+ return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
case IrOpcode::kI32x4Sub:
return MarkAsSimd128(node), VisitI32x4Sub(node);
case IrOpcode::kI32x4Mul:
@@ -1598,6 +1608,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8Add(node);
case IrOpcode::kI16x8AddSaturateS:
return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
+ case IrOpcode::kI16x8AddHoriz:
+ return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
case IrOpcode::kI16x8SubSaturateS:
@@ -1698,10 +1710,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
+ case IrOpcode::kS32x4Shuffle:
+ return MarkAsSimd128(node), VisitS32x4Shuffle(node);
case IrOpcode::kS32x4Select:
return MarkAsSimd128(node), VisitS32x4Select(node);
+ case IrOpcode::kS16x8Shuffle:
+ return MarkAsSimd128(node), VisitS16x8Shuffle(node);
case IrOpcode::kS16x8Select:
return MarkAsSimd128(node), VisitS16x8Select(node);
+ case IrOpcode::kS8x16Shuffle:
+ return MarkAsSimd128(node), VisitS8x16Shuffle(node);
case IrOpcode::kS8x16Select:
return MarkAsSimd128(node), VisitS8x16Select(node);
case IrOpcode::kS1x4Zero:
@@ -1887,14 +1905,6 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-void InstructionSelector::VisitStackSlot(Node* node) {
- int size = StackSlotSizeOf(node->op());
- int slot = frame_->AllocateSpillSlot(size);
- OperandGenerator g(this);
-
- Emit(kArchStackSlot, g.DefineAsRegister(node),
- sequence()->AddImmediate(Constant(slot)), 0, nullptr);
-}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
EmitIdentity(node);
@@ -2088,9 +2098,7 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
@@ -2099,12 +2107,14 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitF32x4RecipSqrtRefine(Node* node) {
- UNIMPLEMENTED();
-}
-
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2115,8 +2125,6 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4RecipRefine(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
@@ -2124,7 +2132,7 @@ void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2164,11 +2172,21 @@ void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@@ -2177,51 +2195,52 @@ void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
@@ -2234,7 +2253,15 @@ void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
@@ -2245,70 +2272,85 @@ void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
@@ -2316,6 +2358,10 @@ void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16LtS(Node* node) { UNIMPLEMENTED(); }
@@ -2326,7 +2372,9 @@ void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2338,11 +2386,15 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16LeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
@@ -2350,7 +2402,7 @@ void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2371,10 +2423,30 @@ void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS32x4Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS16x8Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4Or(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index bbcd03d3ec..5cb28627de 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1109,12 +1109,12 @@ class V8_EXPORT_PRIVATE Constant final {
private:
Type type_;
- int64_t value_;
#if V8_TARGET_ARCH_32_BIT
RelocInfo::Mode rmode_ = RelocInfo::NONE32;
#else
RelocInfo::Mode rmode_ = RelocInfo::NONE64;
#endif
+ int64_t value_;
};
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index bea8f18b63..9ca0c63eb9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -114,8 +114,10 @@ MaybeHandle<Map> GetMapWitness(Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- if (NodeProperties::InferReceiverMaps(receiver, effect, &maps)) {
- if (maps.size() == 1) return MaybeHandle<Map>(maps[0]);
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
+ return maps[0];
}
return MaybeHandle<Map>();
}
@@ -734,11 +736,23 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
Node* value = NodeProperties::GetValueInput(node, 2);
+ Type* value_type = NodeProperties::GetType(value);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Constant-fold based on {value} type.
+ if (value_type->Is(Type::Array())) {
+ Node* value = jsgraph()->TrueConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ } else if (!value_type->Maybe(Type::ArrayOrProxy())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
int count = 0;
Node* values[5];
Node* effects[5];
@@ -829,11 +843,11 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // TODO(turbofan): Extend this to also handle fast (holey) double elements
+ // TODO(turbofan): Extend this to also handle fast holey double elements
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -859,15 +873,20 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Node* efalse = effect;
Node* vfalse;
{
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
// Load the elements backing store from the {receiver}.
Node* elements = efalse = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
receiver, efalse, if_false);
// Ensure that we aren't popping from a copy-on-write backing store.
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
- elements, efalse, if_false);
+ if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, efalse, if_false);
+ }
// Compute the new {length}.
length = graph()->NewNode(simplified()->NumberSubtract(), length,
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index f0febc4d26..1e1d3a92ab 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/feedback-vector-inl.h"
+#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -123,9 +124,16 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
Node* arg_array = NodeProperties::GetValueInput(node, 3);
if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
for (Edge edge : arg_array->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ Node* const user = edge.from();
+ if (user == node) continue;
+ // Ignore uses as frame state's locals or parameters.
+ if (user->opcode() == IrOpcode::kStateValues) continue;
+ // Ignore uses as frame state's accumulator.
+ if (user->opcode() == IrOpcode::kFrameState &&
+ user->InputAt(2) == arg_array) {
+ continue;
+ }
if (!NodeProperties::IsValueEdge(edge)) continue;
- if (edge.from() == node) continue;
return NoChange();
}
// Check if the arguments can be handled in the fast case (i.e. we don't
@@ -165,7 +173,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
node->RemoveInput(0); // Function.prototype.apply
node->RemoveInput(2); // arguments
NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
- start_index, p.tail_call_mode()));
+ 2, start_index, p.tail_call_mode()));
return Changed(node);
}
// Get to the actual frame state from which to extract the arguments;
@@ -272,94 +280,41 @@ Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
return Changed(node);
}
-namespace {
-
-bool CanInlineApiCall(Isolate* isolate, Node* node,
- Handle<FunctionTemplateInfo> function_template_info) {
- DCHECK(node->opcode() == IrOpcode::kJSCall);
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- if (function_template_info->call_code()->IsUndefined(isolate)) {
- return false;
- }
- CallParameters const& params = CallParametersOf(node->op());
- // CallApiCallbackStub expects the target in a register, so we count it out,
- // and counts the receiver as an implicit argument, so we count the receiver
- // out too.
- int const argc = static_cast<int>(params.arity()) - 2;
- if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
- return false;
- }
- HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
- if (!receiver.HasValue()) {
- return false;
- }
- return receiver.Value()->IsUndefined(isolate) ||
- (receiver.Value()->map()->IsJSObjectMap() &&
- !receiver.Value()->map()->is_access_check_needed());
-}
-
-} // namespace
-
-JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
- Handle<JSObject> object,
- Handle<FunctionTemplateInfo> function_template_info,
- Handle<JSObject>* holder) {
- DCHECK(object->map()->IsJSObjectMap());
- Handle<Map> object_map(object->map());
- Handle<FunctionTemplateInfo> expected_receiver_type;
- if (!function_template_info->signature()->IsUndefined(isolate())) {
- expected_receiver_type =
- handle(FunctionTemplateInfo::cast(function_template_info->signature()));
- }
- if (expected_receiver_type.is_null() ||
- expected_receiver_type->IsTemplateFor(*object_map)) {
- *holder = Handle<JSObject>::null();
- return kHolderIsReceiver;
- }
- while (object_map->has_hidden_prototype()) {
- Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- object_map = handle(prototype->map());
- if (expected_receiver_type->IsTemplateFor(*object_map)) {
- *holder = prototype;
- return kHolderFound;
- }
- }
- return kHolderNotFound;
-}
-
-// ES6 section B.2.2.1.1 get Object.prototype.__proto__
-Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 1);
+Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
Node* effect = NodeProperties::GetEffectInput(node);
- // Try to determine the {receiver} map.
- ZoneHandleSet<Map> receiver_maps;
+ // Try to determine the {object} map.
+ ZoneHandleSet<Map> object_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(object, effect, &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
Handle<Map> candidate_map(
- receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
+ object_maps[0]->GetPrototypeChainRootMap(isolate()));
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+ // We cannot deal with primitives here.
+ if (candidate_map->IsPrimitiveMap()) return NoChange();
+
// Check if we can constant-fold the {candidate_prototype}.
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- Handle<Map> const receiver_map(
- receiver_maps[i]->GetPrototypeChainRootMap(isolate()));
- if (receiver_map->IsJSProxyMap() ||
- receiver_map->has_hidden_prototype() ||
- receiver_map->is_access_check_needed() ||
- receiver_map->prototype() != *candidate_prototype) {
+ for (size_t i = 0; i < object_maps.size(); ++i) {
+ Handle<Map> const object_map(
+ object_maps[i]->GetPrototypeChainRootMap(isolate()));
+ if (object_map->IsSpecialReceiverMap() ||
+ object_map->has_hidden_prototype() ||
+ object_map->prototype() != *candidate_prototype) {
+ // We exclude special receivers, like JSProxy or API objects that
+ // might require access checks here; we also don't want to deal
+ // with hidden prototypes at this point.
return NoChange();
}
if (result == NodeProperties::kUnreliableReceiverMaps &&
- !receiver_map->is_stable()) {
+ !object_map->is_stable()) {
return NoChange();
}
}
if (result == NodeProperties::kUnreliableReceiverMaps) {
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- dependencies()->AssumeMapStable(receiver_maps[i]);
+ for (size_t i = 0; i < object_maps.size(); ++i) {
+ dependencies()->AssumeMapStable(object_maps[i]);
}
}
Node* value = jsgraph()->Constant(candidate_prototype);
@@ -370,65 +325,116 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return NoChange();
}
+// ES6 section 19.1.2.11 Object.getPrototypeOf ( O )
+Reduction JSCallReducer::ReduceObjectGetPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* object = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ return ReduceObjectGetPrototype(node, object);
+}
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ return ReduceObjectGetPrototype(node, receiver);
+}
+
+// ES6 section 26.1.7 Reflect.getPrototypeOf ( target )
+Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ return ReduceObjectGetPrototype(node, target);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
- Node* node, Node* target,
- Handle<FunctionTemplateInfo> function_template_info) {
- Isolate* isolate = this->isolate();
- CHECK(!isolate->serializer_enabled());
- HeapObjectMatcher m(target);
- DCHECK(m.HasValue() && m.Value()->IsJSFunction());
- if (!CanInlineApiCall(isolate, node, function_template_info)) {
- return NoChange();
- }
- Handle<CallHandlerInfo> call_handler_info(
- handle(CallHandlerInfo::cast(function_template_info->call_code())));
- Handle<Object> data(call_handler_info->data(), isolate);
+ Node* node, Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int const argc = static_cast<int>(p.arity()) - 2;
+ Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
+ ? jsgraph()->HeapConstant(global_proxy())
+ : NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
- Node* receiver_node = NodeProperties::GetValueInput(node, 1);
- CallParameters const& params = CallParametersOf(node->op());
+ // CallApiCallbackStub expects the target in a register, so we count it out,
+ // and counts the receiver as an implicit argument, so we count the receiver
+ // out too.
+ if (argc > CallApiCallbackStub::kArgMax) return NoChange();
- Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
- bool const receiver_is_undefined = receiver->IsUndefined(isolate);
- if (receiver_is_undefined) {
- receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
- } else {
- DCHECK(receiver->map()->IsJSObjectMap() &&
- !receiver->map()->is_access_check_needed());
+ // Infer the {receiver} maps, and check if we can inline the API function
+ // callback based on those.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (!receiver_map->IsJSObjectMap() ||
+ (!function_template_info->accept_any_receiver() &&
+ receiver_map->is_access_check_needed())) {
+ return NoChange();
+ }
+ // In case of unreliable {receiver} information, the {receiver_maps}
+ // must all be stable in order to consume the information.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ if (!receiver_map->is_stable()) return NoChange();
+ }
}
- Handle<JSObject> holder;
- HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
- function_template_info, &holder);
- if (lookup == kHolderNotFound) return NoChange();
- if (receiver_is_undefined) {
- receiver_node = jsgraph()->HeapConstant(receiver);
- NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+ // See if we can constant-fold the compatible receiver checks.
+ CallOptimization call_optimization(function_template_info);
+ if (!call_optimization.is_simple_api_call()) return NoChange();
+ CallOptimization::HolderLookup lookup;
+ Handle<JSObject> api_holder =
+ call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return NoChange();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ CallOptimization::HolderLookup lookupi;
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_maps[i], &lookupi);
+ if (lookup != lookupi) return NoChange();
+ if (!api_holder.is_identical_to(holder)) return NoChange();
}
- Node* holder_node =
- lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
-
- Zone* zone = graph()->zone();
- // Same as CanInlineApiCall: exclude the target (which goes in a register) and
- // the receiver (which is implicitly counted by CallApiCallbackStub) from the
- // arguments count.
- int const argc = static_cast<int>(params.arity() - 2);
- CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+
+ // Install stability dependencies for unreliable {receiver_maps}.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ dependencies()->AssumeMapStable(receiver_maps[i]);
+ }
+ }
+
+ // CallApiCallbackStub's register arguments: code, target, call data, holder,
+ // function address.
+ // TODO(turbofan): Consider introducing a JSCallApiCallback operator for
+ // this and lower it during JSGenericLowering, and unify this with the
+ // JSNativeContextSpecialization::InlineApiCall method a bit.
+ Handle<CallHandlerInfo> call_handler_info(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+ Handle<Object> data(call_handler_info->data(), isolate());
+ CallApiCallbackStub stub(isolate(), argc, false);
CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate, zone, cid,
+ isolate(), graph()->zone(), cid,
cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ Node* holder = lookup == CallOptimization::kHolderFound
+ ? jsgraph()->HeapConstant(api_holder)
+ : receiver;
ExternalReference function_reference(
- &api_function, ExternalReference::DIRECT_API_CALL, isolate);
-
- // CallApiCallbackStub's register arguments: code, target, call data, holder,
- // function address.
- node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(zone, 2, jsgraph()->Constant(data));
- node->InsertInput(zone, 3, holder_node);
- node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+ &api_function, ExternalReference::DIRECT_API_CALL, isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 3, holder);
+ node->InsertInput(graph()->zone(), 4,
+ jsgraph()->ExternalConstant(function_reference));
+ node->ReplaceInput(5, receiver);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -448,62 +454,96 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
// of spread (except for value uses in frame states).
if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
for (Edge edge : spread->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ Node* const user = edge.from();
+ if (user == node) continue;
+ // Ignore uses as frame state's locals or parameters.
+ if (user->opcode() == IrOpcode::kStateValues) continue;
+ // Ignore uses as frame state's accumulator.
+ if (user->opcode() == IrOpcode::kFrameState && user->InputAt(2) == spread) {
+ continue;
+ }
if (!NodeProperties::IsValueEdge(edge)) continue;
- if (edge.from() == node) continue;
return NoChange();
}
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {spread}).
- CreateArgumentsType type = CreateArgumentsTypeOf(spread->op());
+ CreateArgumentsType const type = CreateArgumentsTypeOf(spread->op());
Node* frame_state = NodeProperties::GetFrameStateInput(spread);
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
- FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
- if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
- // Need to take the parameters from the arguments adaptor.
- frame_state = outer_state;
- }
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
int start_index = 0;
+ // Determine the formal parameter count;
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ int formal_parameter_count = shared->internal_formal_parameter_count();
if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ // Mapped arguments (sloppy mode) that are aliased can only be handled
+ // here if there's no side-effect between the {node} and the {arg_array}.
+ // TODO(turbofan): Further relax this constraint.
+ if (formal_parameter_count != 0) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect != spread) {
+ if (effect->op()->EffectInputCount() != 1 ||
+ !(effect->op()->properties() & Operator::kNoWrite)) {
+ return NoChange();
+ }
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ }
} else if (type == CreateArgumentsType::kRestParameter) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- start_index = shared->internal_formal_parameter_count();
+ start_index = formal_parameter_count;
// Only check the array iterator protector when we have a rest object.
if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
- // Add a code dependency on the array iterator protector.
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
}
+ // Install appropriate code dependencies.
dependencies()->AssumeMapStable(
isolate()->initial_array_iterator_prototype_map());
-
+ if (type == CreateArgumentsType::kRestParameter) {
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ }
+ // Remove the spread input from the {node}.
node->RemoveInput(arity--);
-
+ // Check if are spreading to inlined arguments or to the arguments of
+ // the outermost function.
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Operator const* op =
+ (node->opcode() == IrOpcode::kJSCallWithSpread)
+ ? javascript()->CallForwardVarargs(arity + 1, start_index,
+ TailCallMode::kDisallow)
+ : javascript()->ConstructForwardVarargs(arity + 2, start_index);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+ }
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {arg_array}).
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
// Add the actual parameters to the {node}, skipping the receiver.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+ for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
node->InsertInput(graph()->zone(), static_cast<int>(++arity),
parameters->InputAt(i));
}
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
if (node->opcode() == IrOpcode::kJSCallWithSpread) {
- NodeProperties::ChangeOp(
- node, javascript()->Call(arity + 1, 7, VectorSlotPair()));
+ NodeProperties::ChangeOp(node, javascript()->Call(arity + 1));
+ Reduction const r = ReduceJSCall(node);
+ return r.Changed() ? r : Changed(node);
} else {
- NodeProperties::ChangeOp(
- node, javascript()->Construct(arity + 2, 7, VectorSlotPair()));
+ NodeProperties::ChangeOp(node, javascript()->Construct(arity + 2));
+ Reduction const r = ReduceJSConstruct(node);
+ return r.Changed() ? r : Changed(node);
}
- return Changed(node);
}
namespace {
@@ -570,8 +610,12 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeHasInstance(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kObjectGetPrototypeOf:
+ return ReduceObjectGetPrototypeOf(node);
case Builtins::kObjectPrototypeGetProto:
return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kReflectGetPrototypeOf:
+ return ReduceReflectGetPrototypeOf(node);
default:
break;
}
@@ -581,10 +625,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceArrayConstructor(node);
}
- if (shared->IsApiFunction()) {
- return ReduceCallApiFunction(
- node, target,
- handle(FunctionTemplateInfo::cast(shared->function_data())));
+ if (!FLAG_runtime_stats && shared->IsApiFunction()) {
+ Handle<FunctionTemplateInfo> function_template_info(
+ FunctionTemplateInfo::cast(shared->function_data()), isolate());
+ return ReduceCallApiFunction(node, function_template_info);
}
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
@@ -835,6 +879,11 @@ Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+Handle<JSGlobalProxy> JSCallReducer::global_proxy() const {
+ return handle(JSGlobalProxy::cast(native_context()->global_proxy()),
+ isolate());
+}
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 29ca61c100..31326084cc 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -41,30 +41,27 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(
- Node* node, Node* target,
- Handle<FunctionTemplateInfo> function_template_info);
+ Node* node, Handle<FunctionTemplateInfo> function_template_info);
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
Reduction ReduceFunctionPrototypeHasInstance(Node* node);
+ Reduction ReduceObjectGetPrototype(Node* node, Node* object);
+ Reduction ReduceObjectGetPrototypeOf(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
+ Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceSpreadCall(Node* node, int arity);
Reduction ReduceJSConstruct(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
- enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
-
- HolderLookup LookupHolder(Handle<JSObject> object,
- Handle<FunctionTemplateInfo> function_template_info,
- Handle<JSObject>* holder);
-
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
+ Handle<JSGlobalProxy> global_proxy() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 0deb7cb38b..c9548ffd1c 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -83,6 +83,45 @@ Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
return Changed(node);
}
+namespace {
+
+bool IsContextParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ return index == start->op()->ValueOutputCount() - 2;
+}
+
+// Given a context {node} and the {distance} from that context to the target
+// context (which we want to read from or store to), try to return a
+// specialization context. If successful, update {distance} to whatever
+// distance remains from the specialization context.
+MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
+ Maybe<OuterContext> maybe_outer) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kParameter: {
+ OuterContext outer;
+ if (maybe_outer.To(&outer) && IsContextParameter(node) &&
+ *distance >= outer.distance) {
+ *distance -= outer.distance;
+ return outer.context;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<Context>();
+}
+
+} // anonymous namespace
+
Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
@@ -90,14 +129,13 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
size_t depth = access.depth();
// First walk up the context chain in the graph as far as possible.
- Node* outer = NodeProperties::GetOuterContext(node, &depth);
+ Node* context = NodeProperties::GetOuterContext(node, &depth);
Handle<Context> concrete;
- if (!NodeProperties::GetSpecializationContext(outer, context())
- .ToHandle(&concrete)) {
+ if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
- return SimplifyJSLoadContext(node, outer, depth);
+ return SimplifyJSLoadContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
@@ -139,14 +177,13 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// First walk up the context chain in the graph until we reduce the depth to 0
// or hit a node that does not have a CreateXYZContext operator.
- Node* outer = NodeProperties::GetOuterContext(node, &depth);
+ Node* context = NodeProperties::GetOuterContext(node, &depth);
Handle<Context> concrete;
- if (!NodeProperties::GetSpecializationContext(outer, context())
- .ToHandle(&concrete)) {
+ if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
- return SimplifyJSStoreContext(node, outer, depth);
+ return SimplifyJSStoreContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index a38aca80bb..0cf2bc1e54 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -15,17 +15,29 @@ namespace compiler {
class JSGraph;
class JSOperatorBuilder;
+// Pair of a context and its distance from some point of reference.
+struct OuterContext {
+ OuterContext() : context(), distance() {}
+ OuterContext(Handle<Context> context_, size_t distance_)
+ : context(context_), distance(distance_) {}
+ Handle<Context> context;
+ size_t distance;
+};
// Specializes a given JSGraph to a given context, potentially constant folding
// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
+// Additionally, constant-folds the function parameter if {closure} is given.
+//
+// The context can be the incoming function context or any outer context
+// thereof, as indicated by {outer}'s {distance}.
class JSContextSpecialization final : public AdvancedReducer {
public:
JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
- MaybeHandle<Context> context,
+ Maybe<OuterContext> outer,
MaybeHandle<JSFunction> closure)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- context_(context),
+ outer_(outer),
closure_(closure) {}
Reduction Reduce(Node* node) final;
@@ -43,11 +55,11 @@ class JSContextSpecialization final : public AdvancedReducer {
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
- MaybeHandle<Context> context() const { return context_; }
+ Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
JSGraph* const jsgraph_;
- MaybeHandle<Context> context_;
+ Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 432b5c620b..57eedfada2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -228,6 +228,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateCatchContext(node);
case IrOpcode::kJSCreateBlockContext:
return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCreateGeneratorObject:
+ return ReduceJSCreateGeneratorObject(node);
default:
break;
}
@@ -548,6 +550,71 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return NoChange();
}
+Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateGeneratorObject, node->opcode());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+ Node* const receiver = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Type* const closure_type = NodeProperties::GetType(closure);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ // Extract constructor and original constructor function.
+ if (closure_type->IsHeapConstant()) {
+ DCHECK(closure_type->AsHeapConstant()->Value()->IsJSFunction());
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(closure_type->AsHeapConstant()->Value());
+ JSFunction::EnsureHasInitialMap(js_function);
+ Handle<Map> initial_map(js_function->initial_map());
+ initial_map->CompleteInobjectSlackTracking();
+ DCHECK(initial_map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {original_constructor}
+ // changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ DCHECK(js_function->shared()->HasBytecodeArray());
+ int size = js_function->shared()->bytecode_array()->register_count();
+ Node* elements = effect = AllocateElements(
+ effect, control, FAST_HOLEY_ELEMENTS, size, NOT_TENURED);
+
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(initial_map->instance_size());
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ Node* undefined = jsgraph()->UndefinedConstant();
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context);
+ a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure);
+ a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver);
+ a.Store(AccessBuilder::ForJSGeneratorObjectInputOrDebugPos(), undefined);
+ a.Store(AccessBuilder::ForJSGeneratorObjectResumeMode(),
+ jsgraph()->Constant(JSGeneratorObject::kNext));
+ a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
+ jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+ a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), elements);
+
+ if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos(),
+ undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
+ undefined);
+ }
+
+ // Handle in-object properties, too.
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ undefined);
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
int capacity,
Handle<AllocationSite> site) {
@@ -594,10 +661,73 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
return Changed(node);
}
+Reduction JSCreateLowering::ReduceNewArray(Node* node,
+ std::vector<Node*> values,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+
+ // Check {values} based on the {elements_kind}. These checks are guarded
+ // by the {elements_kind} feedback on the {site}, so it's safe to just
+ // deoptimize in this case.
+ if (IsFastSmiElementsKind(elements_kind)) {
+ for (auto& value : values) {
+ if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ }
+ }
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ for (auto& value : values) {
+ if (!NodeProperties::GetType(value)->Is(Type::Number())) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+ }
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
+
+ // Retrieve the initial map for the array.
+ int const array_map_index = Context::ArrayMapIndex(elements_kind);
+ Node* js_array_map = jsgraph()->HeapConstant(
+ handle(Map::cast(native_context()->get(array_map_index)), isolate()));
+
+ // Setup elements, properties and length.
+ Node* elements = effect =
+ AllocateElements(effect, control, elements_kind, values, pretenure);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ Node* length = jsgraph()->Constant(static_cast<int>(values.size()));
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* node, Handle<AllocationSite> site) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+ Type* new_target_type = NodeProperties::GetType(new_target);
ElementsKind elements_kind = site->GetElementsKind();
AllocationSiteOverrideMode override_mode =
@@ -605,12 +735,19 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
+ // The Array constructor can only trigger an observable side-effect
+ // if the new.target may be a proxy.
+ Operator::Properties const properties =
+ (new_target != target || new_target_type->Maybe(Type::Proxy()))
+ ? Operator::kNoDeopt
+ : Operator::kNoDeopt | Operator::kNoWrite;
+
if (arity == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
@@ -628,7 +765,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
@@ -655,7 +792,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
@@ -678,7 +815,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
isolate(), GetHoleyElementsKind(elements_kind), override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
@@ -745,12 +882,25 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else if (p.arity() == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type* length_type = NodeProperties::GetType(length);
+ if (!length_type->Maybe(Type::Number())) {
+ // Handle the single argument case, where we know that the value
+ // cannot be a valid Array length.
+ return ReduceNewArray(node, {length}, site);
+ }
if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
length_type->Max() <= kElementLoopUnrollLimit &&
length_type->Min() == length_type->Max()) {
int capacity = static_cast<int>(length_type->Max());
return ReduceNewArray(node, length, capacity, site);
}
+ } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
+ std::vector<Node*> values;
+ values.reserve(p.arity());
+ for (size_t i = 0; i < p.arity(); ++i) {
+ values.push_back(
+ NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
+ }
+ return ReduceNewArray(node, values, site);
}
}
@@ -1115,6 +1265,31 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
return a.Finish();
}
+Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ std::vector<Node*> const& values,
+ PretenureFlag pretenure) {
+ int const capacity = static_cast<int>(values.size());
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, values[i]);
+ }
+ return a.Finish();
+}
+
Node* JSCreateLowering::AllocateFastLiteral(
Node* effect, Node* control, Handle<JSObject> boilerplate,
AllocationSiteUsageContext* site_context) {
@@ -1206,7 +1381,7 @@ Node* JSCreateLowering::AllocateFastLiteral(
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(boilerplate_map->instance_size(), pretenure,
- Type::OtherObject());
+ Type::For(boilerplate_map));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
builder.Store(AccessBuilder::ForJSObjectElements(), elements);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index eea75d3842..d03464d39d 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -57,8 +57,11 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateWithContext(Node* node);
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCreateGeneratorObject(Node* node);
Reduction ReduceNewArray(Node* node, Node* length, int capacity,
Handle<AllocationSite> site);
+ Reduction ReduceNewArray(Node* node, std::vector<Node*> values,
+ Handle<AllocationSite> site);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
@@ -69,6 +72,10 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
PretenureFlag pretenure);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ std::vector<Node*> const& values,
+ PretenureFlag pretenure);
Node* AllocateFastLiteral(Node* effect, Node* control,
Handle<JSObject> boilerplate,
AllocationSiteUsageContext* site_context);
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 73e1b7dd24..d4f6822de6 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -16,8 +16,6 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kOsrValue:
return ReduceOsrValue(node);
- case IrOpcode::kOsrGuard:
- return ReduceOsrGuard(node);
case IrOpcode::kParameter:
return ReduceParameter(node);
default:
@@ -47,13 +45,6 @@ Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
return Replace(jsgraph()->Constant(value));
}
-Reduction JSFrameSpecialization::ReduceOsrGuard(Node* node) {
- DCHECK_EQ(IrOpcode::kOsrGuard, node->opcode());
- ReplaceWithValue(node, node->InputAt(0),
- NodeProperties::GetEffectInput(node));
- return Changed(node);
-}
-
Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
Handle<Object> value;
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index daf699265c..f268b3ac5b 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -29,7 +29,6 @@ class JSFrameSpecialization final : public AdvancedReducer {
private:
Reduction ReduceOsrValue(Node* node);
- Reduction ReduceOsrGuard(Node* node);
Reduction ReduceParameter(Node* node);
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 2b333c06c5..ea5a4a4627 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -309,10 +309,10 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- LanguageMode language_mode = OpParameter<LanguageMode>(node);
- ReplaceWithRuntimeCall(node, is_strict(language_mode)
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kDeleteProperty);
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
@@ -423,6 +423,13 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
}
}
+void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCreateGeneratorObject);
+ node->RemoveInput(4); // control
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
@@ -464,8 +471,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable =
- CodeFactory::FastCloneShallowObject(isolate(), p.length());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
@@ -516,6 +522,28 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
}
+void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
+ ConstructForwardVarargsParameters p =
+ ConstructForwardVarargsParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* start_index = jsgraph()->Uint32Constant(p.start_index());
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, start_index);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSConstruct(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
@@ -556,17 +584,20 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
- Callable callable = CodeFactory::CallForwardVarargs(isolate());
+ int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::CallForwardVarargs(isolate());
if (p.tail_call_mode() == TailCallMode::kAllow) {
flags |= CallDescriptor::kSupportsTailCalls;
}
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, start_index);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, start_index);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index b51623aca2..93706acf5a 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -280,6 +280,14 @@ Node* JSGraph::EmptyStateValues() {
0, SparseInputMask::Dense())));
}
+Node* JSGraph::SingleDeadTypedStateValues() {
+ return CACHED(kSingleDeadTypedStateValues,
+ graph()->NewNode(common()->TypedStateValues(
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(0, graph()->zone()),
+ SparseInputMask(SparseInputMask::kEndMarker << 1))));
+}
+
Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 8f81555cb2..4b3ed4856a 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -142,6 +142,10 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// values for a certain part of the frame state.
Node* EmptyStateValues();
+ // Typed state values with a single dead input. This is useful to represent
+ // dead accumulator.
+ Node* SingleDeadTypedStateValues();
+
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
@@ -181,6 +185,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kOneConstant,
kNaNConstant,
kEmptyStateValues,
+ kSingleDeadTypedStateValues,
kDead,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 9774de28e5..4335e96c61 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -65,6 +65,15 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
return true;
}
+bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) {
+ // Don't forcibly inline functions that weren't compiled yet.
+ if (shared->ast_node_count() == 0) return false;
+
+ // Forcibly inline small functions.
+ if (shared->ast_node_count() <= FLAG_max_inlined_nodes_small) return true;
+ return false;
+}
+
} // namespace
Reduction JSInliningHeuristic::Reduce(Node* node) {
@@ -91,7 +100,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Functions marked with %SetForceInlineFlag are immediately inlined.
- bool can_inline = false, force_inline = true;
+ bool can_inline = false, force_inline = true, small_inline = true;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
@@ -100,11 +109,15 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
if (!shared->force_inline()) {
force_inline = false;
}
- if (CanInlineFunction(shared)) {
+ candidate.can_inline_function[i] = CanInlineFunction(shared);
+ if (candidate.can_inline_function[i]) {
can_inline = true;
}
+ if (!IsSmallInlineFunction(shared)) {
+ small_inline = false;
+ }
}
- if (force_inline) return InlineCandidate(candidate);
+ if (force_inline) return InlineCandidate(candidate, true);
if (!can_inline) return NoChange();
// Stop inlining once the maximum allowed level is reached.
@@ -141,11 +154,27 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
case kRestrictedInlining:
return NoChange();
case kStressInlining:
- return InlineCandidate(candidate);
+ return InlineCandidate(candidate, false);
case kGeneralInlining:
break;
}
+ // Don't consider a {candidate} whose frequency is below the
+ // threshold, i.e. a call site that is only hit once every N
+ // invocations of the caller.
+ if (candidate.frequency.IsKnown() &&
+ candidate.frequency.value() < FLAG_min_inlining_frequency) {
+ return NoChange();
+ }
+
+ // Forcibly inline small functions here. In the case of polymorphic inlining
+ // small_inline is set only when all functions are small.
+ if (small_inline && cumulative_count_ <= FLAG_max_inlined_nodes_absolute) {
+ TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
+ node->op()->mnemonic());
+ return InlineCandidate(candidate, true);
+ }
+
// In the general case we remember the candidate for later.
candidates_.insert(candidate);
return NoChange();
@@ -164,19 +193,16 @@ void JSInliningHeuristic::Finalize() {
auto i = candidates_.begin();
Candidate candidate = *i;
candidates_.erase(i);
- // Only include candidates that we've successfully called before.
- // The candidate list is sorted, so we can exit at the first occurance of
- // frequency 0 in the list.
- if (candidate.frequency <= 0.0) return;
// Make sure we don't try to inline dead candidate nodes.
if (!candidate.node->IsDead()) {
- Reduction const reduction = InlineCandidate(candidate);
+ Reduction const reduction = InlineCandidate(candidate, false);
if (reduction.Changed()) return;
}
}
}
-Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
+Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
+ bool force_inline) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
if (num_calls == 1) {
@@ -268,12 +294,16 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
- Reduction const reduction = inliner_.ReduceJSCall(node);
- if (reduction.Changed()) {
- // Killing the call node is not strictly necessary, but it is safer to
- // make sure we do not resurrect the node.
- node->Kill();
- cumulative_count_ += function->shared()->ast_node_count();
+ if (force_inline ||
+ (candidate.can_inline_function[i] &&
+ cumulative_count_ < FLAG_max_inlined_nodes_cumulative)) {
+ Reduction const reduction = inliner_.ReduceJSCall(node);
+ if (reduction.Changed()) {
+ // Killing the call node is not strictly necessary, but it is safer to
+ // make sure we do not resurrect the node.
+ node->Kill();
+ cumulative_count_ += function->shared()->ast_node_count();
+ }
}
}
@@ -282,9 +312,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
bool JSInliningHeuristic::CandidateCompare::operator()(
const Candidate& left, const Candidate& right) const {
- if (left.frequency > right.frequency) {
+ if (right.frequency.IsUnknown()) {
+ if (left.frequency.IsUnknown()) {
+ // If left and right are both unknown then the ordering is indeterminate,
+ // which breaks strict weak ordering requirements, so we fall back to the
+ // node id as a tie breaker.
+ return left.node->id() > right.node->id();
+ }
+ return true;
+ } else if (left.frequency.IsUnknown()) {
+ return false;
+ } else if (left.frequency.value() > right.frequency.value()) {
return true;
- } else if (left.frequency < right.frequency) {
+ } else if (left.frequency.value() < right.frequency.value()) {
return false;
} else {
return left.node->id() > right.node->id();
@@ -292,10 +332,12 @@ bool JSInliningHeuristic::CandidateCompare::operator()(
}
void JSInliningHeuristic::PrintCandidates() {
- PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
+ OFStream os(stdout);
+ os << "Candidates for inlining (size=" << candidates_.size() << "):\n";
for (const Candidate& candidate : candidates_) {
- PrintF(" #%d:%s, frequency:%g\n", candidate.node->id(),
- candidate.node->op()->mnemonic(), candidate.frequency);
+ os << " #" << candidate.node->id() << ":"
+ << candidate.node->op()->mnemonic()
+ << ", frequency: " << candidate.frequency << std::endl;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index b834cb0a06..0f5f9f87c1 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -37,14 +37,17 @@ class JSInliningHeuristic final : public AdvancedReducer {
struct Candidate {
Handle<JSFunction> functions[kMaxCallPolymorphism];
+ // In the case of polymorphic inlining, this tells if each of the
+ // functions could be inlined.
+ bool can_inline_function[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
// we use {num_functions == 1 && functions[0].is_null()} as an indicator.
Handle<SharedFunctionInfo> shared_info;
int num_functions;
- Node* node = nullptr; // The call site at which to inline.
- float frequency = 0.0f; // Relative frequency of this call site.
+ Node* node = nullptr; // The call site at which to inline.
+ CallFrequency frequency; // Relative frequency of this call site.
};
// Comparator for candidates.
@@ -57,7 +60,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
// Dumps candidates to console.
void PrintCandidates();
- Reduction InlineCandidate(Candidate const& candidate);
+ Reduction InlineCandidate(Candidate const& candidate, bool force_inline);
CommonOperatorBuilder* common() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index af24b703d3..9b260e3533 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -66,7 +66,7 @@ class JSCallAccessor {
return call_->op()->ValueInputCount() - 2;
}
- float frequency() const {
+ CallFrequency frequency() const {
return (call_->opcode() == IrOpcode::kJSCall)
? CallParametersOf(call_->op()).frequency()
: ConstructParametersOf(call_->op()).frequency();
@@ -335,10 +335,11 @@ bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
Isolate* const isolate = shared_info->GetIsolate();
Code* const construct_stub = shared_info->construct_stub();
- return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
- construct_stub !=
- *isolate->builtins()->JSBuiltinsConstructStubForDerived() &&
- construct_stub != *isolate->builtins()->JSConstructStubApi();
+ if (construct_stub == *isolate->builtins()->JSConstructStubGeneric()) {
+ return !IsDerivedConstructor(shared_info->kind());
+ } else {
+ return false;
+ }
}
bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
@@ -486,18 +487,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- // TODO(706642): Don't inline derived class constructors for now, as the
- // inlining logic doesn't deal properly with derived class constructors
- // that return a primitive, i.e. it's not in sync with what the Parser
- // and the JSConstructSub does.
- if (node->opcode() == IrOpcode::kJSConstruct &&
- IsDerivedConstructor(shared_info->kind())) {
- TRACE("Not inlining %s into %s because constructor is derived.\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (node->opcode() == IrOpcode::kJSCall &&
@@ -655,21 +644,93 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
uncaught_subcalls.push_back(create); // Adds {IfSuccess} & {IfException}.
NodeProperties::ReplaceControlInput(node, create);
NodeProperties::ReplaceEffectInput(node, create);
- // Insert a check of the return value to determine whether the return
- // value or the implicit receiver should be selected as a result of the
- // call.
- Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- check, node, create);
- NodeProperties::ReplaceUses(node, select, node, node, node);
- // Fix-up inputs that have been mangled by the {ReplaceUses} call above.
- NodeProperties::ReplaceValueInput(select, node, 1); // Fix-up input.
- NodeProperties::ReplaceValueInput(check, node, 0); // Fix-up input.
+ Node* node_success =
+ NodeProperties::FindSuccessfulControlProjection(node);
+ // Placeholder to hold {node}'s value dependencies while {node} is
+ // replaced.
+ Node* dummy = graph()->NewNode(common()->Dead());
+ NodeProperties::ReplaceUses(node, dummy, node, node, node);
+ Node* result;
+ if (FLAG_harmony_restrict_constructor_return &&
+ IsClassConstructor(shared_info->kind())) {
+ Node* is_undefined =
+ graph()->NewNode(simplified()->ReferenceEqual(), node,
+ jsgraph()->UndefinedConstant());
+ Node* branch_is_undefined =
+ graph()->NewNode(common()->Branch(), is_undefined, node_success);
+ Node* branch_is_undefined_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_undefined);
+ Node* branch_is_undefined_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_undefined);
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ Node* branch_is_receiver = graph()->NewNode(
+ common()->Branch(), is_receiver, branch_is_undefined_false);
+ Node* branch_is_receiver_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_receiver);
+ Node* branch_is_receiver_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_receiver);
+ branch_is_receiver_false =
+ graph()->NewNode(javascript()->CallRuntime(
+ Runtime::kThrowConstructorReturnedNonObject),
+ context, NodeProperties::GetFrameStateInput(node),
+ node, branch_is_receiver_false);
+ uncaught_subcalls.push_back(branch_is_receiver_false);
+ branch_is_receiver_false =
+ graph()->NewNode(common()->Throw(), branch_is_receiver_false,
+ branch_is_receiver_false);
+ NodeProperties::MergeControlToEnd(graph(), common(),
+ branch_is_receiver_false);
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), branch_is_undefined_true,
+ branch_is_receiver_true);
+ result =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ create, node, merge);
+ ReplaceWithValue(node_success, node_success, node_success, merge);
+ // Fix input destroyed by the above {ReplaceWithValue} call.
+ NodeProperties::ReplaceControlInput(branch_is_undefined, node_success,
+ 0);
+ } else {
+ // Insert a check of the return value to determine whether the return
+ // value or the implicit receiver should be selected as a result of the
+ // call.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ result =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ check, node, create);
+ }
receiver = create; // The implicit receiver.
+ ReplaceWithValue(dummy, result);
+ } else if (IsDerivedConstructor(shared_info->kind())) {
+ Node* node_success =
+ NodeProperties::FindSuccessfulControlProjection(node);
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ Node* branch_is_receiver =
+ graph()->NewNode(common()->Branch(), is_receiver, node_success);
+ Node* branch_is_receiver_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_receiver);
+ Node* branch_is_receiver_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_receiver);
+ branch_is_receiver_false =
+ graph()->NewNode(javascript()->CallRuntime(
+ Runtime::kThrowConstructorReturnedNonObject),
+ context, NodeProperties::GetFrameStateInput(node),
+ node, branch_is_receiver_false);
+ uncaught_subcalls.push_back(branch_is_receiver_false);
+ branch_is_receiver_false =
+ graph()->NewNode(common()->Throw(), branch_is_receiver_false,
+ branch_is_receiver_false);
+ NodeProperties::MergeControlToEnd(graph(), common(),
+ branch_is_receiver_false);
+
+ ReplaceWithValue(node_success, node_success, node_success,
+ branch_is_receiver_true);
+ // Fix input destroyed by the above {ReplaceWithValue} call.
+ NodeProperties::ReplaceControlInput(branch_is_receiver, node_success, 0);
}
node->ReplaceInput(1, receiver);
-
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index a18551c642..b9ee8a4ed6 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -38,6 +38,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceDeoptimizeNow(node);
case Runtime::kInlineGeneratorClose:
return ReduceGeneratorClose(node);
+ case Runtime::kInlineCreateJSGeneratorObject:
+ return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
case Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos:
@@ -56,6 +58,18 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSProxy:
return ReduceIsInstanceType(node, JS_PROXY_TYPE);
+ case Runtime::kInlineIsJSMap:
+ return ReduceIsInstanceType(node, JS_MAP_TYPE);
+ case Runtime::kInlineIsJSSet:
+ return ReduceIsInstanceType(node, JS_SET_TYPE);
+ case Runtime::kInlineIsJSMapIterator:
+ return ReduceIsInstanceType(node, JS_MAP_ITERATOR_TYPE);
+ case Runtime::kInlineIsJSSetIterator:
+ return ReduceIsInstanceType(node, JS_SET_ITERATOR_TYPE);
+ case Runtime::kInlineIsJSWeakMap:
+ return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
+ case Runtime::kInlineIsJSWeakSet:
+ return ReduceIsInstanceType(node, JS_WEAK_SET_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -86,6 +100,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
case Runtime::kInlineArrayBufferViewGetByteOffset:
return ReduceArrayBufferViewField(
node, AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Runtime::kInlineArrayBufferViewWasNeutered:
+ return ReduceArrayBufferViewWasNeutered(node);
case Runtime::kInlineMaxSmi:
return ReduceMaxSmi(node);
case Runtime::kInlineTypedArrayGetLength:
@@ -145,6 +161,19 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceCreateJSGeneratorObject(Node* node) {
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+ Node* const receiver = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op = javascript()->CreateGeneratorObject();
+ Node* create_generator =
+ graph()->NewNode(op, closure, receiver, context, effect, control);
+ ReplaceWithValue(node, create_generator, create_generator);
+ return Changed(create_generator);
+}
+
Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -336,10 +365,7 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(
- node,
- javascript()->Call(arity, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kAny, TailCallMode::kDisallow));
+ NodeProperties::ChangeOp(node, javascript()->Call(arity));
return Changed(node);
}
@@ -374,6 +400,22 @@ Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
return Replace(value);
}
+Reduction JSIntrinsicLowering::ReduceArrayBufferViewWasNeutered(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
Node* value = jsgraph()->Constant(Smi::kMaxValue);
ReplaceWithValue(node, value);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 2a2baf0930..0f3e84a5e5 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDebugIsActive(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
+ Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
@@ -68,6 +69,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
// TODO(turbofan): typedarray.js support; drop once TypedArrays are
// converted to proper CodeStubAssembler based builtins.
Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
+ Reduction ReduceArrayBufferViewWasNeutered(Node* node);
Reduction ReduceMaxSmi(Node* node);
Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 66013b85ca..5a3ccebed1 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -24,21 +24,21 @@ namespace compiler {
namespace {
-bool HasNumberMaps(MapList const& maps) {
+bool HasNumberMaps(MapHandles const& maps) {
for (auto map : maps) {
if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
}
return false;
}
-bool HasOnlyJSArrayMaps(MapList const& maps) {
+bool HasOnlyJSArrayMaps(MapHandles const& maps) {
for (auto map : maps) {
if (!map->IsJSArrayMap()) return false;
}
return true;
}
-bool HasOnlyNumberMaps(MapList const& maps) {
+bool HasOnlyNumberMaps(MapHandles const& maps) {
for (auto map : maps) {
if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
}
@@ -154,11 +154,6 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
if (function_prototype->IsConstructor()) {
ReplaceWithValue(node, value);
return Replace(value);
- } else {
- node->InsertInput(graph()->zone(), 0, value);
- NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor));
- return Changed(node);
}
}
@@ -247,9 +242,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
node->ReplaceInput(2, object);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
- node,
- javascript()->Call(3, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined));
+ node, javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
@@ -271,10 +265,17 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
Node* object = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
- // Check if the {constructor} is a JSBoundFunction.
+ // Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
- if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+ if (!m.HasValue()) return NoChange();
+
+ // Check if the {constructor} is a JSBoundFunction.
+ if (m.Value()->IsJSBoundFunction()) {
// OrdinaryHasInstance on bound functions turns into a recursive
// invocation of the instanceof operator again.
// ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
@@ -288,6 +289,160 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
return reduction.Changed() ? reduction : Changed(node);
}
+ // Check if the {constructor} is a JSFunction.
+ if (m.Value()->IsJSFunction()) {
+ // Check if the {function} is a constructor and has an instance "prototype".
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ if (function->IsConstructor() && function->has_instance_prototype() &&
+ function->prototype()->IsJSReceiver()) {
+ // Ensure that the {function} has a valid initial map, so we can
+ // depend on that for the prototype constant-folding below.
+ JSFunction::EnsureHasInitialMap(function);
+
+ // Install a code dependency on the {function}s initial map.
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Handle<JSReceiver> function_prototype =
+ handle(JSReceiver::cast(initial_map->prototype()), isolate());
+
+ // Check if we can constant-fold the prototype chain walk
+ // for the given {object} and the {function_prototype}.
+ InferHasInPrototypeChainResult result =
+ InferHasInPrototypeChain(object, effect, function_prototype);
+ if (result != kMayBeInPrototypeChain) {
+ Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ Node* prototype = jsgraph()->Constant(function_prototype);
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = object =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ object, object, loop);
+
+ // Load the {object} map and instance type.
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object, effect, control);
+ Node* object_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ object_map, effect, control);
+
+ // Check if the {object} is a special receiver, because for special
+ // receivers, i.e. proxies or API objects that need access checks,
+ // we have to use the %HasInPrototypeChain runtime function instead.
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberLessThanOrEqual(), object_instance_type,
+ jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch1);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+
+ // Check if the {object} is not a receiver at all.
+ Node* check10 =
+ graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
+ jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+ Node* branch10 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check10, if_true1);
+
+ // A primitive value cannot match the {prototype} we're looking for.
+ if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+ vtrue1 = jsgraph()->FalseConstant();
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+ Node* efalse1 = etrue1;
+ Node* vfalse1;
+ {
+ // Slow path, need to call the %HasInPrototypeChain runtime function.
+ vfalse1 = efalse1 = if_false1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
+ prototype, context, frame_state, efalse1, if_false1);
+
+ // Replace any potential {IfException} uses of {node} to catch
+ // exceptions from this %HasInPrototypeChain runtime call instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse1);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ Revisit(on_exception);
+ }
+ }
+
+ // Load the {object} prototype.
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
+ effect, control);
+
+ // Check if we reached the end of {object}s prototype chain.
+ Node* check2 =
+ graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
+ jsgraph()->NullConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = effect;
+ Node* vtrue2 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch2);
+
+ // Check if we reached the {prototype}.
+ Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+ object_prototype, prototype);
+ Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
+
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = effect;
+ Node* vtrue3 = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch3);
+
+ // Close the loop.
+ vloop->ReplaceInput(1, object_prototype);
+ eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(5), if_true0, if_true1,
+ if_true2, if_true3, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+ etrue3, efalse1, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vtrue1);
+ node->ReplaceInput(2, vtrue2);
+ node->ReplaceInput(3, vtrue3);
+ node->ReplaceInput(4, vfalse1);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(
+ node, common()->Phi(MachineRepresentation::kTagged, 5));
+ return Changed(node);
+ }
+ }
+
return NoChange();
}
@@ -551,9 +706,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
- Node* node, Node* value, MapHandleList const& receiver_maps,
- Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
- Node* index) {
+ Node* node, Node* value, MapHandles const& receiver_maps, Handle<Name> name,
+ AccessMode access_mode, LanguageMode language_mode, Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -568,8 +722,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Check if we have an access o.x or o.x=v where o is the current
// native contexts' global proxy, and turn that into a direct access
// to the current native contexts' global object instead.
- if (receiver_maps.length() == 1) {
- Handle<Map> receiver_map = receiver_maps.first();
+ if (receiver_maps.size() == 1) {
+ Handle<Map> receiver_map = receiver_maps.front();
if (receiver_map->IsJSGlobalProxyMap()) {
Object* maybe_constructor = receiver_map->GetConstructor();
// Detached global proxies have |null| as their constructor.
@@ -686,7 +840,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* this_control = fallthrough_control;
// Perform map check on {receiver}.
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
{
// Emit a (sequence of) map checks for other {receiver}s.
ZoneVector<Node*> this_controls(zone());
@@ -801,10 +955,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
}
// Extract receiver maps from the IC using the {nexus}.
- MapHandleList receiver_maps;
+ MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
- } else if (receiver_maps.length() == 0) {
+ } else if (receiver_maps.empty()) {
if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
@@ -831,11 +985,11 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
p.name().is_identical_to(factory()->prototype_string())) {
// Optimize "prototype" property of functions.
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->has_initial_map()) {
+ if (function->IsConstructor()) {
// We need to add a code dependency on the initial map of the
// {function} in order to be notified about changes to the
- // "prototype" of {function}, so it doesn't make sense to
- // continue unless deoptimization is enabled.
+ // "prototype" of {function}.
+ JSFunction::EnsureHasInitialMap(function);
Handle<Map> initial_map(function->initial_map(), isolate());
dependencies()->AssumeInitialMapCantChange(initial_map);
Handle<Object> prototype(function->prototype(), isolate());
@@ -892,7 +1046,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceElementAccess(
- Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
+ Node* node, Node* index, Node* value, MapHandles const& receiver_maps,
AccessMode access_mode, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1056,7 +1210,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
receiver, this_effect, this_control);
// Perform map check(s) on {receiver}.
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
@@ -1189,10 +1343,10 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
// Extract receiver maps from the {nexus}.
- MapHandleList receiver_maps;
+ MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
- } else if (receiver_maps.length() == 0) {
+ } else if (receiver_maps.empty()) {
if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
@@ -1347,7 +1501,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the getter function.
if (access_info.constant()->IsJSFunction()) {
value = effect = control = graph()->NewNode(
- javascript()->Call(2, 0.0f, VectorSlotPair(),
+ javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state0, effect, control);
} else {
@@ -1383,7 +1537,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the setter function.
if (access_info.constant()->IsJSFunction()) {
effect = control = graph()->NewNode(
- javascript()->Call(3, 0.0f, VectorSlotPair(),
+ javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state0, effect, control);
} else {
@@ -1756,7 +1910,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Node* buffer;
@@ -2059,7 +2213,7 @@ JSNativeContextSpecialization::InlineApiCall(
int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
CallApiCallbackStub stub(
- isolate(), argc, call_data_object->IsUndefined(isolate()),
+ isolate(), argc,
true /* FunctionTemplateInfo doesn't have an associated context. */);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
@@ -2127,7 +2281,7 @@ Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& receiver_maps) {
+ MapHandles const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
Handle<Map> receiver_map(m.Value()->map(), isolate());
@@ -2154,6 +2308,15 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
Handle<Map> map, Node* properties, Node* effect, Node* control) {
+ // TODO(bmeurer/jkummerow): Property deletions can undo map transitions
+ // while keeping the backing store around, meaning that even though the
+ // map might believe that objects have no unused property fields, there
+ // might actually be some. It would be nice to not create a new backing
+ // store in that case (i.e. when properties->length() >= new_length).
+ // However, introducing branches and Phi nodes here would make it more
+ // difficult for escape analysis to get rid of the backing stores used
+ // for intermediate states of chains of property additions. That makes
+ // it unclear what the best approach is here.
DCHECK_EQ(0, map->unused_property_fields());
// Compute the length of the old {properties} and the new properties.
int length = map->NextFreePropertyIndex() - map->GetInObjectProperties();
@@ -2192,7 +2355,7 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
}
void JSNativeContextSpecialization::AssumePrototypesStable(
- std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
+ MapHandles const& receiver_maps, Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
for (auto map : receiver_maps) {
// Perform the implicit ToObject for primitives here.
@@ -2207,7 +2370,7 @@ void JSNativeContextSpecialization::AssumePrototypesStable(
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
- std::vector<Handle<Map>> const& receiver_maps) {
+ MapHandles const& receiver_maps) {
// Check if the array prototype chain is intact.
if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
@@ -2241,10 +2404,61 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return true;
}
+JSNativeContextSpecialization::InferHasInPrototypeChainResult
+JSNativeContextSpecialization::InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<JSReceiver> prototype) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+
+ // Check if either all or none of the {receiver_maps} have the given
+ // {prototype} in their prototype chain.
+ bool all = true;
+ bool none = true;
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ // In case of an unreliable {result} we need to ensure that all
+ // {receiver_maps} are stable, because otherwise we cannot trust
+ // the {receiver_maps} information, since arbitrary side-effects
+ // may have happened.
+ if (!receiver_map->is_stable()) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ for (PrototypeIterator j(receiver_map);; j.Advance()) {
+ if (j.IsAtEnd()) {
+ all = false;
+ break;
+ }
+ Handle<JSReceiver> const current =
+ PrototypeIterator::GetCurrent<JSReceiver>(j);
+ if (current.is_identical_to(prototype)) {
+ none = false;
+ break;
+ }
+ if (!current->map()->is_stable() ||
+ current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ }
+ DCHECK_IMPLIES(all, !none);
+ DCHECK_IMPLIES(none, !all);
+
+ if (all) return kIsInPrototypeChain;
+ if (none) return kIsNotInPrototypeChain;
+ return kMayBeInPrototypeChain;
+}
+
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
- MapHandleList* receiver_maps) {
- DCHECK_EQ(0, receiver_maps->length());
+ MapHandles* receiver_maps) {
+ DCHECK_EQ(0, receiver_maps->size());
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the infered {receiver_maps}.
@@ -2255,11 +2469,12 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
// Try to filter impossible candidates based on infered root map.
Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
- for (int i = receiver_maps->length(); --i >= 0;) {
- if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
- receiver_maps->Remove(i);
- }
- }
+ receiver_maps->erase(
+ std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+ [receiver_map](const Handle<Map>& map) {
+ return map->FindRootMap() != *receiver_map;
+ }),
+ receiver_maps->end());
}
return true;
}
@@ -2267,13 +2482,13 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
}
bool JSNativeContextSpecialization::InferReceiverMaps(
- Node* receiver, Node* effect, MapHandleList* receiver_maps) {
+ Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->Add(maps[i]);
+ receiver_maps->push_back(maps[i]);
}
return true;
} else if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2283,7 +2498,7 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
if (!maps[i]->is_stable()) return false;
}
for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->Add(maps[i]);
+ receiver_maps->push_back(maps[i]);
}
return true;
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index cd1b3349ad..2f9df08f81 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -8,7 +8,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
-#include "src/feedback-vector.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -16,6 +16,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class FeedbackNexus;
namespace compiler {
@@ -67,7 +68,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- MapHandleList const& receiver_maps,
+ MapHandles const& receiver_maps,
AccessMode access_mode,
LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
@@ -82,7 +83,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
LanguageMode language_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
- MapHandleList const& receiver_maps,
+ MapHandles const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
Node* index = nullptr);
@@ -130,7 +131,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Construct an appropriate map check.
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& maps);
+ MapHandles const& maps);
// Construct appropriate subgraph to extend properties backing store.
Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
@@ -138,26 +139,37 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
+ void AssumePrototypesStable(MapHandles const& receiver_maps,
Handle<JSObject> holder);
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
- bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps);
+ bool CanTreatHoleAsUndefined(MapHandles const& receiver_maps);
+
+ // Checks if we know at compile time that the {receiver} either definitely
+ // has the {prototype} in it's prototype chain, or the {receiver} definitely
+ // doesn't have the {prototype} in it's prototype chain.
+ enum InferHasInPrototypeChainResult {
+ kIsInPrototypeChain,
+ kIsNotInPrototypeChain,
+ kMayBeInPrototypeChain
+ };
+ InferHasInPrototypeChainResult InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<JSReceiver> prototype);
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
bool ExtractReceiverMaps(Node* receiver, Node* effect,
FeedbackNexus const& nexus,
- MapHandleList* receiver_maps);
+ MapHandles* receiver_maps);
// Try to infer maps for the given {receiver} at the current {effect}.
// If maps are returned then you can be sure that the {receiver} definitely
// has one of the returned maps at this point in the program (identified
// by {effect}).
bool InferReceiverMaps(Node* receiver, Node* effect,
- MapHandleList* receiver_maps);
+ MapHandles* receiver_maps);
// Try to infer a root map for the {receiver} independent of the current
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 74156b086d..b8156a23f4 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -17,6 +17,11 @@ namespace v8 {
namespace internal {
namespace compiler {
+std::ostream& operator<<(std::ostream& os, CallFrequency f) {
+ if (f.IsUnknown()) return os << "unknown";
+ return os << f.value();
+}
+
VectorSlotPair::VectorSlotPair() {}
@@ -52,6 +57,17 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op) {
return OpParameter<ToBooleanHints>(op);
}
+std::ostream& operator<<(std::ostream& os,
+ ConstructForwardVarargsParameters const& p) {
+ return os << p.arity() << ", " << p.start_index();
+}
+
+ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConstructForwardVarargs, op->opcode());
+ return OpParameter<ConstructForwardVarargsParameters>(op);
+}
+
bool operator==(ConstructParameters const& lhs,
ConstructParameters const& rhs) {
return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
@@ -113,7 +129,8 @@ const CallParameters& CallParametersOf(const Operator* op) {
std::ostream& operator<<(std::ostream& os,
CallForwardVarargsParameters const& p) {
- return os << p.start_index() << ", " << p.tail_call_mode();
+ return os << p.arity() << ", " << p.start_index() << ", "
+ << p.tail_call_mode();
}
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
@@ -738,16 +755,16 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
}
const Operator* JSOperatorBuilder::CallForwardVarargs(
- uint32_t start_index, TailCallMode tail_call_mode) {
- CallForwardVarargsParameters parameters(start_index, tail_call_mode);
+ size_t arity, uint32_t start_index, TailCallMode tail_call_mode) {
+ CallForwardVarargsParameters parameters(arity, start_index, tail_call_mode);
return new (zone()) Operator1<CallForwardVarargsParameters>( // --
IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties, // opcode
"JSCallForwardVarargs", // name
- 2, 1, 1, 1, 1, 2, // counts
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
-const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
+const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode,
TailCallMode tail_call_mode) {
@@ -793,7 +810,18 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
+const Operator* JSOperatorBuilder::ConstructForwardVarargs(
+ size_t arity, uint32_t start_index) {
+ ConstructForwardVarargsParameters parameters(arity, start_index);
+ return new (zone()) Operator1<ConstructForwardVarargsParameters>( // --
+ IrOpcode::kJSConstructForwardVarargs, Operator::kNoProperties, // opcode
+ "JSConstructForwardVarargs", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+const Operator* JSOperatorBuilder::Construct(uint32_t arity,
+ CallFrequency frequency,
VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
@@ -891,14 +919,19 @@ const Operator* JSOperatorBuilder::StoreNamedOwn(
parameters); // parameter
}
-const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
- return new (zone()) Operator1<LanguageMode>( // --
+const Operator* JSOperatorBuilder::DeleteProperty() {
+ return new (zone()) Operator( // --
IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
"JSDeleteProperty", // name
- 2, 1, 1, 1, 1, 2, // counts
- language_mode); // parameter
+ 3, 1, 1, 1, 1, 2); // counts
}
+const Operator* JSOperatorBuilder::CreateGeneratorObject() {
+ return new (zone()) Operator( // --
+ IrOpcode::kJSCreateGeneratorObject, Operator::kEliminatable, // opcode
+ "JSCreateGeneratorObject", // name
+ 2, 1, 1, 1, 1, 0); // counts
+}
const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index d7b0dfab9b..5ac3b6769e 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -26,6 +26,37 @@ namespace compiler {
class Operator;
struct JSOperatorGlobalCache;
+// Defines the frequency a given Call/Construct site was executed. For some
+// call sites the frequency is not known.
+class CallFrequency final {
+ public:
+ CallFrequency() : value_(std::numeric_limits<float>::quiet_NaN()) {}
+ explicit CallFrequency(float value) : value_(value) {
+ DCHECK(!std::isnan(value));
+ }
+
+ bool IsKnown() const { return !IsUnknown(); }
+ bool IsUnknown() const { return std::isnan(value_); }
+ float value() const {
+ DCHECK(IsKnown());
+ return value_;
+ }
+
+ bool operator==(CallFrequency const& that) const {
+ return bit_cast<uint32_t>(this->value_) == bit_cast<uint32_t>(that.value_);
+ }
+ bool operator!=(CallFrequency const& that) const { return !(*this == that); }
+
+ friend size_t hash_value(CallFrequency f) {
+ return bit_cast<uint32_t>(f.value_);
+ }
+
+ private:
+ float value_;
+};
+
+std::ostream& operator<<(std::ostream&, CallFrequency);
+
// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
// is used to access the type feedback for a certain {Node}.
class V8_EXPORT_PRIVATE VectorSlotPair {
@@ -59,22 +90,56 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
// The ToBooleanHints are used as parameter by JSToBoolean operators.
ToBooleanHints ToBooleanHintsOf(Operator const* op);
+// Defines the flags for a JavaScript call forwarding parameters. This
+// is used as parameter by JSConstructForwardVarargs operators.
+class ConstructForwardVarargsParameters final {
+ public:
+ ConstructForwardVarargsParameters(size_t arity, uint32_t start_index)
+ : bit_field_(ArityField::encode(arity) |
+ StartIndexField::encode(start_index)) {}
+
+ size_t arity() const { return ArityField::decode(bit_field_); }
+ uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
+
+ bool operator==(ConstructForwardVarargsParameters const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(ConstructForwardVarargsParameters const& that) const {
+ return !(*this == that);
+ }
+
+ private:
+ friend size_t hash_value(ConstructForwardVarargsParameters const& p) {
+ return p.bit_field_;
+ }
+
+ typedef BitField<size_t, 0, 16> ArityField;
+ typedef BitField<uint32_t, 16, 16> StartIndexField;
+
+ uint32_t const bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&,
+ ConstructForwardVarargsParameters const&);
+
+ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
+ Operator const*) WARN_UNUSED_RESULT;
// Defines the arity and the feedback for a JavaScript constructor call. This is
// used as a parameter by JSConstruct operators.
class ConstructParameters final {
public:
- ConstructParameters(uint32_t arity, float frequency,
+ ConstructParameters(uint32_t arity, CallFrequency frequency,
VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
uint32_t arity() const { return arity_; }
- float frequency() const { return frequency_; }
+ CallFrequency frequency() const { return frequency_; }
VectorSlotPair const& feedback() const { return feedback_; }
private:
uint32_t const arity_;
- float const frequency_;
+ CallFrequency const frequency_;
VectorSlotPair const feedback_;
};
@@ -115,11 +180,13 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const*);
// is used as parameter by JSCallForwardVarargs operators.
class CallForwardVarargsParameters final {
public:
- CallForwardVarargsParameters(uint32_t start_index,
+ CallForwardVarargsParameters(size_t arity, uint32_t start_index,
TailCallMode tail_call_mode)
- : bit_field_(StartIndexField::encode(start_index) |
+ : bit_field_(ArityField::encode(arity) |
+ StartIndexField::encode(start_index) |
TailCallModeField::encode(tail_call_mode)) {}
+ size_t arity() const { return ArityField::decode(bit_field_); }
uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
TailCallMode tail_call_mode() const {
return TailCallModeField::decode(bit_field_);
@@ -137,8 +204,9 @@ class CallForwardVarargsParameters final {
return p.bit_field_;
}
- typedef BitField<uint32_t, 0, 30> StartIndexField;
- typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+ typedef BitField<size_t, 0, 15> ArityField;
+ typedef BitField<uint32_t, 15, 15> StartIndexField;
+ typedef BitField<TailCallMode, 30, 1> TailCallModeField;
uint32_t const bit_field_;
};
@@ -152,8 +220,9 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
// used as a parameter by JSCall operators.
class CallParameters final {
public:
- CallParameters(size_t arity, float frequency, VectorSlotPair const& feedback,
- TailCallMode tail_call_mode, ConvertReceiverMode convert_mode)
+ CallParameters(size_t arity, CallFrequency frequency,
+ VectorSlotPair const& feedback, TailCallMode tail_call_mode,
+ ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
ConvertReceiverModeField::encode(convert_mode) |
TailCallModeField::encode(tail_call_mode)),
@@ -161,7 +230,7 @@ class CallParameters final {
feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
- float frequency() const { return frequency_; }
+ CallFrequency frequency() const { return frequency_; }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
@@ -187,7 +256,7 @@ class CallParameters final {
typedef BitField<TailCallMode, 31, 1> TailCallModeField;
uint32_t const bit_field_;
- float const frequency_;
+ CallFrequency const frequency_;
VectorSlotPair const feedback_;
};
@@ -633,10 +702,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
- const Operator* CallForwardVarargs(uint32_t start_index,
+ const Operator* CallForwardVarargs(size_t arity, uint32_t start_index,
TailCallMode tail_call_mode);
const Operator* Call(
- size_t arity, float frequency = 0.0f,
+ size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -644,8 +713,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
- const Operator* Construct(uint32_t arity, float frequency,
- VectorSlotPair const& feedback);
+
+ const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
+ const Operator* Construct(uint32_t arity,
+ CallFrequency frequency = CallFrequency(),
+ VectorSlotPair const& feedback = VectorSlotPair());
const Operator* ConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -662,12 +734,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback);
const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
- const Operator* DeleteProperty(LanguageMode language_mode);
+ const Operator* DeleteProperty();
const Operator* HasProperty();
const Operator* GetSuperConstructor();
+ const Operator* CreateGeneratorObject();
+
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 420e68cdf5..64838a1f83 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -488,13 +488,13 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- pointer_comparable_type_(Type::Union(
- Type::Oddball(),
- Type::Union(
- Type::SymbolOrReceiver(),
- Type::HeapConstant(factory()->empty_string(), graph()->zone()),
- graph()->zone()),
- graph()->zone())),
+ empty_string_type_(
+ Type::HeapConstant(factory()->empty_string(), graph()->zone())),
+ pointer_comparable_type_(
+ Type::Union(Type::Oddball(),
+ Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
+ graph()->zone()),
+ graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -535,6 +535,23 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
}
+ // Eliminate useless concatenation of empty string.
+ if ((flags() & kDeoptimizationEnabled) &&
+ BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (r.LeftInputIs(empty_string_type_)) {
+ Node* value = effect = graph()->NewNode(simplified()->CheckString(),
+ r.right(), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ } else if (r.RightInputIs(empty_string_type_)) {
+ Node* value = effect = graph()->NewNode(simplified()->CheckString(),
+ r.left(), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
flags = STRING_ADD_CONVERT_LEFT;
@@ -1328,16 +1345,12 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Type* constructor_type = NodeProperties::GetType(constructor);
Node* object = NodeProperties::GetValueInput(node, 1);
Type* object_type = NodeProperties::GetType(object);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
// Check if the {constructor} cannot be callable.
// See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
if (!constructor_type->Maybe(Type::Callable())) {
Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value, effect, control);
+ ReplaceWithValue(node, value);
return Replace(value);
}
@@ -1347,156 +1360,11 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
if (!object_type->Maybe(Type::Receiver()) &&
!constructor_type->Maybe(Type::BoundFunction())) {
Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value, effect, control);
+ ReplaceWithValue(node, value);
return Replace(value);
}
- // Check if the {constructor} is a (known) JSFunction.
- if (!constructor_type->IsHeapConstant() ||
- !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
- return NoChange();
- }
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(constructor_type->AsHeapConstant()->Value());
-
- // Check if the {function} already has an initial map (i.e. the
- // {function} has been used as a constructor at least once).
- if (!function->has_initial_map()) return NoChange();
-
- // Check if the {function}s "prototype" is a JSReceiver.
- if (!function->prototype()->IsJSReceiver()) return NoChange();
-
- // Install a code dependency on the {function}s initial map.
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
-
- Node* prototype =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
-
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* vloop = object = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), object, object, loop);
- // TODO(jarin): This is a very ugly hack to work-around the super-smart
- // implicit typing of the Phi, which goes completely nuts if the {object}
- // is for example a HeapConstant.
- NodeProperties::SetType(vloop, Type::NonInternal());
-
- // Load the {object} map and instance type.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
- effect, control);
- Node* object_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), object_map,
- effect, control);
-
- // Check if the {object} is a special receiver, because for special
- // receivers, i.e. proxies or API objects that need access checks,
- // we have to use the %HasInPrototypeChain runtime function instead.
- Node* check1 = graph()->NewNode(
- simplified()->NumberLessThanOrEqual(), object_instance_type,
- jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- control = graph()->NewNode(common()->IfFalse(), branch1);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
-
- // Check if the {object} is not a receiver at all.
- Node* check10 =
- graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
- jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
- Node* branch10 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
-
- // A primitive value cannot match the {prototype} we're looking for.
- if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
- vtrue1 = jsgraph()->FalseConstant();
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
- Node* efalse1 = etrue1;
- Node* vfalse1;
- {
- // Slow path, need to call the %HasInPrototypeChain runtime function.
- vfalse1 = efalse1 = if_false1 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
- prototype, context, frame_state, efalse1, if_false1);
-
- // Replace any potential {IfException} uses of {node} to catch exceptions
- // from this %HasInPrototypeChain runtime call instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse1);
- NodeProperties::ReplaceEffectInput(on_exception, efalse1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- Revisit(on_exception);
- }
- }
-
- // Load the {object} prototype.
- Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
- effect, control);
-
- // Check if we reached the end of {object}s prototype chain.
- Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, jsgraph()->NullConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = effect;
- Node* vtrue2 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch2);
-
- // Check if we reached the {prototype}.
- Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, prototype);
- Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = effect;
- Node* vtrue3 = jsgraph()->TrueConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch3);
-
- // Close the loop.
- vloop->ReplaceInput(1, object_prototype);
- eloop->ReplaceInput(1, effect);
- loop->ReplaceInput(1, control);
-
- control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
- if_true3, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
- etrue3, efalse1, control);
-
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, vtrue0);
- node->ReplaceInput(1, vtrue1);
- node->ReplaceInput(2, vtrue2);
- node->ReplaceInput(3, vtrue3);
- node->ReplaceInput(4, vfalse1);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(node,
- common()->Phi(MachineRepresentation::kTagged, 5));
- return Changed(node);
+ return NoChange();
}
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
@@ -1542,35 +1410,49 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
return Changed(node);
}
-Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadModule ||
+ node->opcode() == IrOpcode::kJSStoreModule);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
int32_t cell_index = OpParameter<int32_t>(node);
Node* module = NodeProperties::GetValueInput(node, 0);
+ Type* module_type = NodeProperties::GetType(module);
+
+ if (module_type->IsHeapConstant()) {
+ Handle<Module> module_constant =
+ Handle<Module>::cast(module_type->AsHeapConstant()->Value());
+ Handle<Cell> cell_constant(module_constant->GetCell(cell_index), isolate());
+ return jsgraph()->HeapConstant(cell_constant);
+ }
- Node* array;
+ FieldAccess field_access;
int index;
if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
ModuleDescriptor::kExport) {
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
- module, effect, control);
+ field_access = AccessBuilder::ForModuleRegularExports();
index = cell_index - 1;
} else {
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kImport);
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
- module, effect, control);
+ field_access = AccessBuilder::ForModuleRegularImports();
index = -cell_index - 1;
}
-
- Node* cell = effect = graph()->NewNode(
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(field_access),
+ module, effect, control);
+ return graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
effect, control);
+}
+Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* cell = BuildGetModuleCell(node);
+ if (cell->op()->EffectOutputCount() > 0) effect = cell;
Node* value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -1583,32 +1465,12 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreModule, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- int32_t cell_index = OpParameter<int32_t>(node);
- Node* module = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node)),
+ ModuleDescriptor::kExport);
- Node* array;
- int index;
- if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport) {
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
- module, effect, control);
- index = cell_index - 1;
- } else {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
- module, effect, control);
- index = -cell_index - 1;
- }
-
- Node* cell = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
- effect, control);
-
+ Node* cell = BuildGetModuleCell(node);
+ if (cell->op()->EffectOutputCount() > 0) effect = cell;
effect =
graph()->NewNode(simplified()->StoreField(AccessBuilder::ForCellValue()),
cell, value, effect, control);
@@ -1858,6 +1720,38 @@ bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
} // namespace
+Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstructForwardVarargs, node->opcode());
+ ConstructForwardVarargsParameters p =
+ ConstructForwardVarargsParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ int const start_index = static_cast<int>(p.start_index());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
+ Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arity + 1,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
@@ -1936,6 +1830,9 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, node->opcode());
CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ int const start_index = static_cast<int>(p.start_index());
Node* target = NodeProperties::GetValueInput(node, 0);
Type* target_type = NodeProperties::GetType(target);
@@ -1951,11 +1848,12 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(p.start_index()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
- node,
- common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 1, flags)));
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arity + 1,
+ flags)));
return Changed(node);
}
@@ -2331,6 +2229,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSStoreModule(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
+ case IrOpcode::kJSConstructForwardVarargs:
+ return ReduceJSConstructForwardVarargs(node);
case IrOpcode::kJSConstruct:
return ReduceJSConstruct(node);
case IrOpcode::kJSCallForwardVarargs:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 98d71c3ed9..0b92a40a5b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -69,6 +69,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
+ Reduction ReduceJSConstructForwardVarargs(Node* node);
Reduction ReduceJSConstruct(Node* node);
Reduction ReduceJSCallForwardVarargs(Node* node);
Reduction ReduceJSCall(Node* node);
@@ -88,6 +89,9 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceSpeculativeNumberBinop(Node* node);
Reduction ReduceSpeculativeNumberComparison(Node* node);
+ // Helper for ReduceJSLoadModule and ReduceJSStoreModule.
+ Node* BuildGetModuleCell(Node* node);
+
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -101,6 +105,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
CompilationDependencies* dependencies_;
Flags flags_;
JSGraph* jsgraph_;
+ Type* empty_string_type_;
Type* shifted_int32_ranges_[4];
Type* pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 81c90d011f..1275f8f6ff 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -176,9 +176,17 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineFixedArrayGet:
case Runtime::kInlineFixedArraySet:
case Runtime::kInlineGeneratorClose:
+ case Runtime::kInlineGeneratorGetContext:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
+ case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsJSMap:
+ case Runtime::kInlineIsJSSet:
+ case Runtime::kInlineIsJSMapIterator:
+ case Runtime::kInlineIsJSSetIterator:
+ case Runtime::kInlineIsJSWeakMap:
+ case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 2c688a1cb5..b4a5b717e6 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -142,7 +142,7 @@ bool IsCompatibleCheck(Node const* a, Node const* b) {
Node* LoadElimination::AbstractChecks::Lookup(Node* node) const {
for (Node* const check : nodes_) {
- if (check && IsCompatibleCheck(check, node)) {
+ if (check && !check->IsDead() && IsCompatibleCheck(check, node)) {
return check;
}
}
@@ -195,13 +195,23 @@ void LoadElimination::AbstractChecks::Print() const {
}
}
-Node* LoadElimination::AbstractElements::Lookup(Node* object,
- Node* index) const {
+namespace {
+
+bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
+ if (r1 == r2) return true;
+ return IsAnyTagged(r1) && IsAnyTagged(r2);
+}
+
+} // namespace
+
+Node* LoadElimination::AbstractElements::Lookup(
+ Node* object, Node* index, MachineRepresentation representation) const {
for (Element const element : elements_) {
if (element.object == nullptr) continue;
DCHECK_NOT_NULL(element.index);
DCHECK_NOT_NULL(element.value);
- if (MustAlias(object, element.object) && MustAlias(index, element.index)) {
+ if (MustAlias(object, element.object) && MustAlias(index, element.index) &&
+ IsCompatible(representation, element.representation)) {
return element.value;
}
}
@@ -470,22 +480,26 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
return this;
}
-Node* LoadElimination::AbstractState::LookupElement(Node* object,
- Node* index) const {
+Node* LoadElimination::AbstractState::LookupElement(
+ Node* object, Node* index, MachineRepresentation representation) const {
if (this->elements_) {
- return this->elements_->Lookup(object, index);
+ return this->elements_->Lookup(object, index, representation);
}
return nullptr;
}
LoadElimination::AbstractState const*
LoadElimination::AbstractState::AddElement(Node* object, Node* index,
- Node* value, Zone* zone) const {
+ Node* value,
+ MachineRepresentation representation,
+ Zone* zone) const {
AbstractState* that = new (zone) AbstractState(*this);
if (that->elements_) {
- that->elements_ = that->elements_->Extend(object, index, value, zone);
+ that->elements_ =
+ that->elements_->Extend(object, index, value, representation, zone);
} else {
- that->elements_ = new (zone) AbstractElements(object, index, value, zone);
+ that->elements_ =
+ new (zone) AbstractElements(object, index, value, representation, zone);
}
return that;
}
@@ -823,7 +837,8 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- if (Node* replacement = state->LookupElement(object, index)) {
+ if (Node* replacement = state->LookupElement(
+ object, index, access.machine_type.representation())) {
// Make sure we don't resurrect dead {replacement} nodes.
if (!replacement->IsDead()) {
// We might need to guard the {replacement} if the type of the
@@ -838,7 +853,8 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
return Replace(replacement);
}
}
- state = state->AddElement(object, index, node, zone());
+ state = state->AddElement(object, index, node,
+ access.machine_type.representation(), zone());
return UpdateState(node, state);
}
return NoChange();
@@ -852,7 +868,8 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- Node* const old_value = state->LookupElement(object, index);
+ Node* const old_value =
+ state->LookupElement(object, index, access.machine_type.representation());
if (old_value == new_value) {
// This store is fully redundant.
return Replace(effect);
@@ -880,7 +897,8 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- state = state->AddElement(object, index, new_value, zone());
+ state = state->AddElement(object, index, new_value,
+ access.machine_type.representation(), zone());
break;
}
return UpdateState(node, state);
@@ -1007,8 +1025,15 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
!ZoneHandleSet<Map>(transition.target())
.contains(object_maps)) {
state = state->KillMaps(object, zone());
- state = state->KillField(
- object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ switch (transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ break;
+ case ElementsTransition::kSlowTransition:
+ // Kill the elements as well.
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ break;
+ }
}
break;
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index cd486a2cd7..5d09aa5124 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/machine-type.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -78,19 +79,23 @@ class V8_EXPORT_PRIVATE LoadElimination final
elements_[i] = Element();
}
}
- AbstractElements(Node* object, Node* index, Node* value, Zone* zone)
+ AbstractElements(Node* object, Node* index, Node* value,
+ MachineRepresentation representation, Zone* zone)
: AbstractElements(zone) {
- elements_[next_index_++] = Element(object, index, value);
+ elements_[next_index_++] = Element(object, index, value, representation);
}
AbstractElements const* Extend(Node* object, Node* index, Node* value,
+ MachineRepresentation representation,
Zone* zone) const {
AbstractElements* that = new (zone) AbstractElements(*this);
- that->elements_[that->next_index_] = Element(object, index, value);
+ that->elements_[that->next_index_] =
+ Element(object, index, value, representation);
that->next_index_ = (that->next_index_ + 1) % arraysize(elements_);
return that;
}
- Node* Lookup(Node* object, Node* index) const;
+ Node* Lookup(Node* object, Node* index,
+ MachineRepresentation representation) const;
AbstractElements const* Kill(Node* object, Node* index, Zone* zone) const;
bool Equals(AbstractElements const* that) const;
AbstractElements const* Merge(AbstractElements const* that,
@@ -101,12 +106,17 @@ class V8_EXPORT_PRIVATE LoadElimination final
private:
struct Element {
Element() {}
- Element(Node* object, Node* index, Node* value)
- : object(object), index(index), value(value) {}
+ Element(Node* object, Node* index, Node* value,
+ MachineRepresentation representation)
+ : object(object),
+ index(index),
+ value(value),
+ representation(representation) {}
Node* object = nullptr;
Node* index = nullptr;
Node* value = nullptr;
+ MachineRepresentation representation = MachineRepresentation::kNone;
};
Element elements_[kMaxTrackedElements];
@@ -224,10 +234,12 @@ class V8_EXPORT_PRIVATE LoadElimination final
Node* LookupField(Node* object, size_t index) const;
AbstractState const* AddElement(Node* object, Node* index, Node* value,
+ MachineRepresentation representation,
Zone* zone) const;
AbstractState const* KillElement(Node* object, Node* index,
Zone* zone) const;
- Node* LookupElement(Node* object, Node* index) const;
+ Node* LookupElement(Node* object, Node* index,
+ MachineRepresentation representation) const;
AbstractState const* AddCheck(Node* node, Zone* zone) const;
Node* LookupCheck(Node* node) const;
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2e66b17a9d..96f7dc1a91 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -70,9 +70,25 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
-int StackSlotSizeOf(Operator const* op) {
+bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
+ return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
+}
+
+bool operator!=(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StackSlotRepresentation rep) {
+ return base::hash_combine(rep.size(), rep.alignment());
+}
+
+std::ostream& operator<<(std::ostream& os, StackSlotRepresentation rep) {
+ return os << "(" << rep.size() << " : " << rep.alignment() << ")";
+}
+
+StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
- return OpParameter<int>(op);
+ return OpParameter<StackSlotRepresentation>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -229,19 +245,14 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(F32x4Div, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Min, Operator::kCommutative, 2, 0, 1) \
V(F32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(F32x4MinNum, Operator::kCommutative, 2, 0, 1) \
- V(F32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
- V(F32x4RecipRefine, Operator::kNoProperties, 2, 0, 1) \
- V(F32x4RecipSqrtRefine, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Eq, Operator::kCommutative, 2, 0, 1) \
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
@@ -252,6 +263,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I32x4SConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
V(I32x4Neg, Operator::kNoProperties, 1, 0, 1) \
V(I32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(I32x4MinS, Operator::kCommutative, 2, 0, 1) \
@@ -274,6 +286,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
V(I16x8SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
@@ -426,13 +439,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(16x8, 16) \
V(8x16, 8)
-#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
+ V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
-struct StackSlotOperator : public Operator1<int> {
- explicit StackSlotOperator(int size)
- : Operator1<int>(IrOpcode::kStackSlot,
- Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
- 0, 0, 1, 0, 0, size) {}
+struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
+ explicit StackSlotOperator(int size, int alignment)
+ : Operator1<StackSlotRepresentation>(
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
+ "StackSlot", 0, 0, 0, 1, 0, 0,
+ StackSlotRepresentation(size, alignment)) {}
};
struct MachineOperatorGlobalCache {
@@ -499,12 +514,15 @@ struct MachineOperatorGlobalCache {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Size) \
- struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
- StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {} \
- }; \
- StackSlotOfSize##Size##Operator kStackSlotSize##Size;
- STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
+#define STACKSLOT(Size, Alignment) \
+ struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
+ : public StackSlotOperator { \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
+ : StackSlotOperator(Size, Alignment) {} \
+ }; \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
+ kStackSlotOfSize##Size##OfAlignment##Alignment;
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
#undef STACKSLOT
#define STORE(Type) \
@@ -755,21 +773,23 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
return nullptr;
}
-const Operator* MachineOperatorBuilder::StackSlot(int size) {
+const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
-#define CASE_CACHED_SIZE(Size) \
- case Size: \
- return &cache_.kStackSlotSize##Size;
- switch (size) {
- STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
- default:
- return new (zone_) StackSlotOperator(size);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
}
+
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
+
#undef CASE_CACHED_SIZE
+ return new (zone_) StackSlotOperator(size, alignment);
}
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
- return StackSlot(1 << ElementSizeLog2Of(rep));
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
+ int alignment) {
+ return StackSlot(1 << ElementSizeLog2Of(rep), alignment);
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -1007,16 +1027,29 @@ SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
-// TODO(bbudge) Add Shuffle, DCHECKs based on format.
-#define SIMD_PERMUTE_OPS(format, bits) \
- const Operator* MachineOperatorBuilder::S##format##Swizzle( \
- uint32_t swizzle) { \
- return new (zone_) \
- Operator1<uint32_t>(IrOpcode::kS##format##Swizzle, Operator::kPure, \
- "Swizzle", 2, 0, 0, 1, 0, 0, swizzle); \
- }
-SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
-#undef SIMD_PERMUTE_OPS
+const Operator* MachineOperatorBuilder::S32x4Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(4);
+ memcpy(array, shuffle, 4);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS32x4Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
+
+const Operator* MachineOperatorBuilder::S16x8Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(8);
+ memcpy(array, shuffle, 8);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS16x8Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
+
+const Operator* MachineOperatorBuilder::S8x16Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(16);
+ memcpy(array, shuffle, 16);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS8x16Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 9ffb355362..82d40a09e3 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -93,7 +93,29 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-int StackSlotSizeOf(Operator const* op);
+class StackSlotRepresentation final {
+ public:
+ StackSlotRepresentation(int size, int alignment)
+ : size_(size), alignment_(alignment) {}
+
+ int size() const { return size_; }
+ int alignment() const { return alignment_; }
+
+ private:
+ int size_;
+ int alignment_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation,
+ StackSlotRepresentation);
+bool operator!=(StackSlotRepresentation, StackSlotRepresentation);
+
+size_t hash_value(StackSlotRepresentation);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ StackSlotRepresentation);
+
+StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
@@ -441,19 +463,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4UConvertI32x4();
const Operator* F32x4Abs();
const Operator* F32x4Neg();
- const Operator* F32x4Sqrt();
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
+ const Operator* F32x4AddHoriz();
const Operator* F32x4Sub();
const Operator* F32x4Mul();
const Operator* F32x4Div();
const Operator* F32x4Min();
const Operator* F32x4Max();
- const Operator* F32x4MinNum();
- const Operator* F32x4MaxNum();
- const Operator* F32x4RecipRefine();
- const Operator* F32x4RecipSqrtRefine();
const Operator* F32x4Eq();
const Operator* F32x4Ne();
const Operator* F32x4Lt();
@@ -469,6 +487,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4Shl(int32_t);
const Operator* I32x4ShrS(int32_t);
const Operator* I32x4Add();
+ const Operator* I32x4AddHoriz();
const Operator* I32x4Sub();
const Operator* I32x4Mul();
const Operator* I32x4MinS();
@@ -498,6 +517,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8SConvertI32x4();
const Operator* I16x8Add();
const Operator* I16x8AddSaturateS();
+ const Operator* I16x8AddHoriz();
const Operator* I16x8Sub();
const Operator* I16x8SubSaturateS();
const Operator* I16x8Mul();
@@ -556,15 +576,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Xor();
const Operator* S128Not();
+ const Operator* S32x4Shuffle(uint8_t shuffle[16]);
const Operator* S32x4Select();
- const Operator* S32x4Swizzle(uint32_t);
- const Operator* S32x4Shuffle();
+ const Operator* S16x8Shuffle(uint8_t shuffle[16]);
const Operator* S16x8Select();
- const Operator* S16x8Swizzle(uint32_t);
- const Operator* S16x8Shuffle();
+ const Operator* S8x16Shuffle(uint8_t shuffle[16]);
const Operator* S8x16Select();
- const Operator* S8x16Swizzle(uint32_t);
- const Operator* S8x16Shuffle();
const Operator* S1x4Zero();
const Operator* S1x4And();
@@ -604,8 +621,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
- const Operator* StackSlot(int size);
- const Operator* StackSlot(MachineRepresentation rep);
+ const Operator* StackSlot(int size, int alignment = 0);
+ const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
// Access to the machine stack.
const Operator* LoadStackPointer();
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 628c79025e..5055735ba6 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -622,8 +622,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
- __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
- __ Call(at);
+ __ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -640,8 +639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
- __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
- __ Jump(at);
+ __ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -777,8 +775,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
- Operand(offset.offset()));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+
+ if (alignment == 2 * kPointerSize) {
+ Label done;
+ __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Addu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kPointerSize) {
+ Label done;
+ __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Subu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Addu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
break;
}
case kIeee754Float64Acos:
@@ -1760,13 +1785,319 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsS32x4Select: {
+ case kMipsS32x4Select:
+ case kMipsS16x8Select:
+ case kMipsS8x16Select: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
+ case kMipsF32x4Abs: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMipsF32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMipsF32x4RecipApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsF32x4RecipSqrtApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsF32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Max: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Min: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Lt: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Le: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4SConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4UConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMipsI16x8ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMipsI16x8ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMipsI16x8Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8AddSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8SubSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMipsI16x8LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8AddSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8SubSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMipsI8x16ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMipsI8x16ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMipsI8x16Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMipsI8x16ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 7d0e755617..f80fae9340 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -154,7 +154,59 @@ namespace compiler {
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
- V(MipsS32x4Select)
+ V(MipsS32x4Select) \
+ V(MipsF32x4Abs) \
+ V(MipsF32x4Neg) \
+ V(MipsF32x4RecipApprox) \
+ V(MipsF32x4RecipSqrtApprox) \
+ V(MipsF32x4Add) \
+ V(MipsF32x4Sub) \
+ V(MipsF32x4Mul) \
+ V(MipsF32x4Max) \
+ V(MipsF32x4Min) \
+ V(MipsF32x4Eq) \
+ V(MipsF32x4Ne) \
+ V(MipsF32x4Lt) \
+ V(MipsF32x4Le) \
+ V(MipsI32x4SConvertF32x4) \
+ V(MipsI32x4UConvertF32x4) \
+ V(MipsI32x4Neg) \
+ V(MipsI32x4LtS) \
+ V(MipsI32x4LeS) \
+ V(MipsI32x4LtU) \
+ V(MipsI32x4LeU) \
+ V(MipsI16x8Splat) \
+ V(MipsI16x8ExtractLane) \
+ V(MipsI16x8ReplaceLane) \
+ V(MipsI16x8Neg) \
+ V(MipsI16x8Shl) \
+ V(MipsI16x8ShrS) \
+ V(MipsI16x8ShrU) \
+ V(MipsI16x8Add) \
+ V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8Sub) \
+ V(MipsI16x8SubSaturateS) \
+ V(MipsI16x8Mul) \
+ V(MipsI16x8MaxS) \
+ V(MipsI16x8MinS) \
+ V(MipsI16x8Eq) \
+ V(MipsI16x8Ne) \
+ V(MipsI16x8LtS) \
+ V(MipsI16x8LeS) \
+ V(MipsI16x8AddSaturateU) \
+ V(MipsI16x8SubSaturateU) \
+ V(MipsI16x8MaxU) \
+ V(MipsI16x8MinU) \
+ V(MipsI16x8LtU) \
+ V(MipsI16x8LeU) \
+ V(MipsI8x16Splat) \
+ V(MipsI8x16ExtractLane) \
+ V(MipsI8x16ReplaceLane) \
+ V(MipsI8x16Neg) \
+ V(MipsI8x16Shl) \
+ V(MipsI8x16ShrS) \
+ V(MipsS16x8Select) \
+ V(MipsS8x16Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index c99be67dd7..1058833a43 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -256,6 +256,16 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -2035,6 +2045,214 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMipsS32x4Select, node);
}
+void InstructionSelector::VisitF32x4Abs(Node* node) {
+ VisitRR(this, kMipsF32x4Abs, node);
+}
+
+void InstructionSelector::VisitF32x4Neg(Node* node) {
+ VisitRR(this, kMipsF32x4Neg, node);
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
+ VisitRR(this, kMipsF32x4RecipApprox, node);
+}
+
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ VisitRR(this, kMipsF32x4RecipSqrtApprox, node);
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) {
+ VisitRRR(this, kMipsF32x4Add, node);
+}
+
+void InstructionSelector::VisitF32x4Sub(Node* node) {
+ VisitRRR(this, kMipsF32x4Sub, node);
+}
+
+void InstructionSelector::VisitF32x4Mul(Node* node) {
+ VisitRRR(this, kMipsF32x4Mul, node);
+}
+
+void InstructionSelector::VisitF32x4Max(Node* node) {
+ VisitRRR(this, kMipsF32x4Max, node);
+}
+
+void InstructionSelector::VisitF32x4Min(Node* node) {
+ VisitRRR(this, kMipsF32x4Min, node);
+}
+
+void InstructionSelector::VisitF32x4Eq(Node* node) {
+ VisitRRR(this, kMipsF32x4Eq, node);
+}
+
+void InstructionSelector::VisitF32x4Ne(Node* node) {
+ VisitRRR(this, kMipsF32x4Ne, node);
+}
+
+void InstructionSelector::VisitF32x4Lt(Node* node) {
+ VisitRRR(this, kMipsF32x4Lt, node);
+}
+
+void InstructionSelector::VisitF32x4Le(Node* node) {
+ VisitRRR(this, kMipsF32x4Le, node);
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ VisitRR(this, kMipsI32x4SConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ VisitRR(this, kMipsI32x4UConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Neg(Node* node) {
+ VisitRR(this, kMipsI32x4Neg, node);
+}
+
+void InstructionSelector::VisitI32x4LtS(Node* node) {
+ VisitRRR(this, kMipsI32x4LtS, node);
+}
+
+void InstructionSelector::VisitI32x4LeS(Node* node) {
+ VisitRRR(this, kMipsI32x4LeS, node);
+}
+
+void InstructionSelector::VisitI32x4LtU(Node* node) {
+ VisitRRR(this, kMipsI32x4LtU, node);
+}
+
+void InstructionSelector::VisitI32x4LeU(Node* node) {
+ VisitRRR(this, kMipsI32x4LeU, node);
+}
+
+void InstructionSelector::VisitI16x8Splat(Node* node) {
+ VisitRR(this, kMipsI16x8Splat, node);
+}
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
+ VisitRRI(this, kMipsI16x8ExtractLane, node);
+}
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsI16x8ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI16x8Neg(Node* node) {
+ VisitRR(this, kMipsI16x8Neg, node);
+}
+
+void InstructionSelector::VisitI16x8Shl(Node* node) {
+ VisitRRI(this, kMipsI16x8Shl, node);
+}
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) {
+ VisitRRI(this, kMipsI16x8ShrS, node);
+}
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) {
+ VisitRRI(this, kMipsI16x8ShrU, node);
+}
+
+void InstructionSelector::VisitI16x8Add(Node* node) {
+ VisitRRR(this, kMipsI16x8Add, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ VisitRRR(this, kMipsI16x8AddSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) {
+ VisitRRR(this, kMipsI16x8Sub, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ VisitRRR(this, kMipsI16x8SubSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) {
+ VisitRRR(this, kMipsI16x8Mul, node);
+}
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) {
+ VisitRRR(this, kMipsI16x8MaxS, node);
+}
+
+void InstructionSelector::VisitI16x8MinS(Node* node) {
+ VisitRRR(this, kMipsI16x8MinS, node);
+}
+
+void InstructionSelector::VisitI16x8Eq(Node* node) {
+ VisitRRR(this, kMipsI16x8Eq, node);
+}
+
+void InstructionSelector::VisitI16x8Ne(Node* node) {
+ VisitRRR(this, kMipsI16x8Ne, node);
+}
+
+void InstructionSelector::VisitI16x8LtS(Node* node) {
+ VisitRRR(this, kMipsI16x8LtS, node);
+}
+
+void InstructionSelector::VisitI16x8LeS(Node* node) {
+ VisitRRR(this, kMipsI16x8LeS, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ VisitRRR(this, kMipsI16x8AddSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ VisitRRR(this, kMipsI16x8SubSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) {
+ VisitRRR(this, kMipsI16x8MaxU, node);
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) {
+ VisitRRR(this, kMipsI16x8MinU, node);
+}
+
+void InstructionSelector::VisitI16x8LtU(Node* node) {
+ VisitRRR(this, kMipsI16x8LtU, node);
+}
+
+void InstructionSelector::VisitI16x8LeU(Node* node) {
+ VisitRRR(this, kMipsI16x8LeU, node);
+}
+
+void InstructionSelector::VisitI8x16Splat(Node* node) {
+ VisitRR(this, kMipsI8x16Splat, node);
+}
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
+ VisitRRI(this, kMipsI8x16ExtractLane, node);
+}
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsI8x16ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI8x16Neg(Node* node) {
+ VisitRR(this, kMipsI8x16Neg, node);
+}
+
+void InstructionSelector::VisitI8x16Shl(Node* node) {
+ VisitRRI(this, kMipsI8x16Shl, node);
+}
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ VisitRRI(this, kMipsI8x16ShrS, node);
+}
+
+void InstructionSelector::VisitS16x8Select(Node* node) {
+ VisitRRRR(this, kMipsS16x8Select, node);
+}
+
+void InstructionSelector::VisitS8x16Select(Node* node) {
+ VisitRRRR(this, kMipsS8x16Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index f1831adf63..f4fb71d989 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -583,8 +583,8 @@ void CodeGenerator::AssembleDeconstructFrame() {
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
frame_access_state()->SetFrameAccessToSP();
}
@@ -597,14 +597,14 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
- __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Branch(&done, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
- __ ld(caller_args_count_reg,
+ __ Ld(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
@@ -696,10 +696,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -709,13 +709,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,7 +784,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchParentFramePointer:
if (frame_access_state()->has_frame()) {
- __ ld(i.OutputRegister(), MemOperand(fp, 0));
+ __ Ld(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
}
@@ -803,7 +803,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Daddu(at, object, index);
- __ sd(value, MemOperand(at));
+ __ Sd(value, MemOperand(at));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -813,8 +813,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
- Operand(offset.offset()));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+ if (alignment == 2 * kPointerSize) {
+ Label done;
+ __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Daddu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kPointerSize) {
+ Label done;
+ __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Daddu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
+
break;
}
case kIeee754Float64Acos:
@@ -1216,19 +1243,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMips64Dext: {
- int16_t pos = i.InputInt8(1);
- int16_t size = i.InputInt8(2);
- if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
- __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- } else if (size > 32 && size <= 64 && pos >= 0 && pos < 32) {
- __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- } else {
- DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
- __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- }
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
break;
}
case kMips64Dins:
@@ -1712,64 +1728,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ seh(i.OutputRegister(), i.InputRegister(0));
break;
case kMips64Lbu:
- __ lbu(i.OutputRegister(), i.MemoryOperand());
+ __ Lbu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lb:
- __ lb(i.OutputRegister(), i.MemoryOperand());
+ __ Lb(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sb:
- __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
- __ lhu(i.OutputRegister(), i.MemoryOperand());
+ __ Lhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lh:
- __ lh(i.OutputRegister(), i.MemoryOperand());
+ __ Lh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sh:
- __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Ush:
__ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMips64Lw:
- __ lw(i.OutputRegister(), i.MemoryOperand());
+ __ Lw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lwu:
- __ lwu(i.OutputRegister(), i.MemoryOperand());
+ __ Lwu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ld:
- __ ld(i.OutputRegister(), i.MemoryOperand());
+ __ Ld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sw:
- __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usw:
__ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Sd:
- __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usd:
__ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lwc1: {
- __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+ __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
case kMips64Ulwc1: {
@@ -1783,7 +1799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ swc1(ft, operand);
+ __ Swc1(ft, operand);
break;
}
case kMips64Uswc1: {
@@ -1797,7 +1813,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64Ldc1:
- __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kMips64Uldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
@@ -1807,7 +1823,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ sdc1(ft, i.MemoryOperand());
+ __ Sdc1(ft, i.MemoryOperand());
break;
}
case kMips64Usdc1: {
@@ -1820,7 +1836,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1835,9 +1851,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64StoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
- __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
@@ -2090,13 +2106,319 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64S32x4Select: {
+ case kMips64S32x4Select:
+ case kMips64S16x8Select:
+ case kMips64S8x16Select: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
+ case kMips64F32x4Abs: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMips64F32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMips64F32x4RecipApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64F32x4RecipSqrtApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64F32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Max: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Min: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Lt: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Le: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4SConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4UConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I16x8ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I16x8ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMips64I16x8Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8AddSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8SubSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMips64I16x8LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8AddSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8SubSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I8x16ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I8x16ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMips64I8x16Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMips64I8x16ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2331,7 +2653,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
- __ ExtractBits(result, i.InputRegister(0), pos, 1);
+ __ Dext(result, i.InputRegister(0), pos, 1);
} else {
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
__ Sltu(result, zero_reg, kScratchReg);
@@ -2657,17 +2979,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
- __ sd(src, g.ToMemOperand(destination));
+ __ Sd(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
- __ ld(g.ToRegister(destination), src);
+ __ Ld(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
- __ ld(temp, src);
- __ sd(temp, g.ToMemOperand(destination));
+ __ Ld(temp, src);
+ __ Sd(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
@@ -2713,15 +3035,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
}
- if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
+ if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ sw(zero_reg, dst);
+ __ Sw(zero_reg, dst);
} else {
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ __ Sw(at, dst);
}
} else {
DCHECK(destination->IsFPRegister());
@@ -2735,7 +3057,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
if (destination->IsFPStackSlot()) {
- __ sdc1(dst, g.ToMemOperand(destination));
+ __ Sdc1(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
@@ -2745,17 +3067,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ sdc1(src, g.ToMemOperand(destination));
+ __ Sdc1(src, g.ToMemOperand(destination));
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsFPRegister()) {
- __ ldc1(g.ToDoubleRegister(destination), src);
+ __ Ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
- __ ldc1(temp, src);
- __ sdc1(temp, g.ToMemOperand(destination));
+ __ Ldc1(temp, src);
+ __ Sdc1(temp, g.ToMemOperand(destination));
}
} else {
UNREACHABLE();
@@ -2781,8 +3103,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
- __ ld(src, dst);
- __ sd(temp, dst);
+ __ Ld(src, dst);
+ __ Sd(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -2790,10 +3112,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
- __ ld(temp_0, src);
- __ ld(temp_1, dst);
- __ sd(temp_0, dst);
- __ sd(temp_1, src);
+ __ Ld(temp_0, src);
+ __ Ld(temp_1, dst);
+ __ Sd(temp_0, dst);
+ __ Sd(temp_1, src);
} else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
@@ -2806,8 +3128,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
- __ ldc1(src, dst);
- __ sdc1(temp, dst);
+ __ Ldc1(src, dst);
+ __ Sdc1(temp, dst);
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
@@ -2817,12 +3139,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ ldc1(temp_1, dst0); // Save destination in temp_1.
- __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ sw(temp_0, dst0);
- __ lw(temp_0, src1);
- __ sw(temp_0, dst1);
- __ sdc1(temp_1, src0);
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Sdc1(temp_1, src0);
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 5d22bc1eba..02cd4d5852 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -188,7 +188,59 @@ namespace compiler {
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
- V(Mips64S32x4Select)
+ V(Mips64S32x4Select) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4LtS) \
+ V(Mips64I32x4LeS) \
+ V(Mips64I32x4LtU) \
+ V(Mips64I32x4LeU) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLane) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8LtS) \
+ V(Mips64I16x8LeS) \
+ V(Mips64I16x8AddSaturateU) \
+ V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8LtU) \
+ V(Mips64I16x8LeU) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLane) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64S16x8Select) \
+ V(Mips64S8x16Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 4e5c4e847e..b4664d036a 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -352,6 +352,17 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
+
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
Node* output = nullptr) {
Mips64OperandGenerator g(selector);
@@ -2786,6 +2797,214 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMips64S32x4Select, node);
}
+void InstructionSelector::VisitF32x4Abs(Node* node) {
+ VisitRR(this, kMips64F32x4Abs, node);
+}
+
+void InstructionSelector::VisitF32x4Neg(Node* node) {
+ VisitRR(this, kMips64F32x4Neg, node);
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
+ VisitRR(this, kMips64F32x4RecipApprox, node);
+}
+
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ VisitRR(this, kMips64F32x4RecipSqrtApprox, node);
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) {
+ VisitRRR(this, kMips64F32x4Add, node);
+}
+
+void InstructionSelector::VisitF32x4Sub(Node* node) {
+ VisitRRR(this, kMips64F32x4Sub, node);
+}
+
+void InstructionSelector::VisitF32x4Mul(Node* node) {
+ VisitRRR(this, kMips64F32x4Mul, node);
+}
+
+void InstructionSelector::VisitF32x4Max(Node* node) {
+ VisitRRR(this, kMips64F32x4Max, node);
+}
+
+void InstructionSelector::VisitF32x4Min(Node* node) {
+ VisitRRR(this, kMips64F32x4Min, node);
+}
+
+void InstructionSelector::VisitF32x4Eq(Node* node) {
+ VisitRRR(this, kMips64F32x4Eq, node);
+}
+
+void InstructionSelector::VisitF32x4Ne(Node* node) {
+ VisitRRR(this, kMips64F32x4Ne, node);
+}
+
+void InstructionSelector::VisitF32x4Lt(Node* node) {
+ VisitRRR(this, kMips64F32x4Lt, node);
+}
+
+void InstructionSelector::VisitF32x4Le(Node* node) {
+ VisitRRR(this, kMips64F32x4Le, node);
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ VisitRR(this, kMips64I32x4SConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ VisitRR(this, kMips64I32x4UConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Neg(Node* node) {
+ VisitRR(this, kMips64I32x4Neg, node);
+}
+
+void InstructionSelector::VisitI32x4LtS(Node* node) {
+ VisitRRR(this, kMips64I32x4LtS, node);
+}
+
+void InstructionSelector::VisitI32x4LeS(Node* node) {
+ VisitRRR(this, kMips64I32x4LeS, node);
+}
+
+void InstructionSelector::VisitI32x4LtU(Node* node) {
+ VisitRRR(this, kMips64I32x4LtU, node);
+}
+
+void InstructionSelector::VisitI32x4LeU(Node* node) {
+ VisitRRR(this, kMips64I32x4LeU, node);
+}
+
+void InstructionSelector::VisitI16x8Splat(Node* node) {
+ VisitRR(this, kMips64I16x8Splat, node);
+}
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
+ VisitRRI(this, kMips64I16x8ExtractLane, node);
+}
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64I16x8ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI16x8Neg(Node* node) {
+ VisitRR(this, kMips64I16x8Neg, node);
+}
+
+void InstructionSelector::VisitI16x8Shl(Node* node) {
+ VisitRRI(this, kMips64I16x8Shl, node);
+}
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) {
+ VisitRRI(this, kMips64I16x8ShrS, node);
+}
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) {
+ VisitRRI(this, kMips64I16x8ShrU, node);
+}
+
+void InstructionSelector::VisitI16x8Add(Node* node) {
+ VisitRRR(this, kMips64I16x8Add, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ VisitRRR(this, kMips64I16x8AddSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) {
+ VisitRRR(this, kMips64I16x8Sub, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ VisitRRR(this, kMips64I16x8SubSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) {
+ VisitRRR(this, kMips64I16x8Mul, node);
+}
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) {
+ VisitRRR(this, kMips64I16x8MaxS, node);
+}
+
+void InstructionSelector::VisitI16x8MinS(Node* node) {
+ VisitRRR(this, kMips64I16x8MinS, node);
+}
+
+void InstructionSelector::VisitI16x8Eq(Node* node) {
+ VisitRRR(this, kMips64I16x8Eq, node);
+}
+
+void InstructionSelector::VisitI16x8Ne(Node* node) {
+ VisitRRR(this, kMips64I16x8Ne, node);
+}
+
+void InstructionSelector::VisitI16x8LtS(Node* node) {
+ VisitRRR(this, kMips64I16x8LtS, node);
+}
+
+void InstructionSelector::VisitI16x8LeS(Node* node) {
+ VisitRRR(this, kMips64I16x8LeS, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ VisitRRR(this, kMips64I16x8AddSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ VisitRRR(this, kMips64I16x8SubSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) {
+ VisitRRR(this, kMips64I16x8MaxU, node);
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) {
+ VisitRRR(this, kMips64I16x8MinU, node);
+}
+
+void InstructionSelector::VisitI16x8LtU(Node* node) {
+ VisitRRR(this, kMips64I16x8LtU, node);
+}
+
+void InstructionSelector::VisitI16x8LeU(Node* node) {
+ VisitRRR(this, kMips64I16x8LeU, node);
+}
+
+void InstructionSelector::VisitI8x16Splat(Node* node) {
+ VisitRR(this, kMips64I8x16Splat, node);
+}
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
+ VisitRRI(this, kMips64I8x16ExtractLane, node);
+}
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64I8x16ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI8x16Neg(Node* node) {
+ VisitRR(this, kMips64I8x16Neg, node);
+}
+
+void InstructionSelector::VisitI8x16Shl(Node* node) {
+ VisitRRI(this, kMips64I8x16Shl, node);
+}
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ VisitRRI(this, kMips64I8x16ShrS, node);
+}
+
+void InstructionSelector::VisitS16x8Select(Node* node) {
+ VisitRRRR(this, kMips64S16x8Select, node);
+}
+
+void InstructionSelector::VisitS8x16Select(Node* node) {
+ VisitRRRR(this, kMips64S8x16Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index a45f7f7a79..452840c1c1 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -138,6 +138,18 @@ bool NodeProperties::IsExceptionalCall(Node* node, Node** out_exception) {
return false;
}
+// static
+Node* NodeProperties::FindSuccessfulControlProjection(Node* node) {
+ DCHECK_GT(node->op()->ControlOutputCount(), 0);
+ if (node->op()->HasProperty(Operator::kNoThrow)) return node;
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) continue;
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ return edge.from();
+ }
+ }
+ return node;
+}
// static
void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
@@ -404,6 +416,13 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
// These never change the map of objects.
break;
}
+ case IrOpcode::kFinishRegion: {
+ // FinishRegion renames the output of allocations, so we need
+ // to update the {receiver} that we are looking for, if the
+ // {receiver} matches the current {effect}.
+ if (IsSame(receiver, effect)) receiver = GetValueInput(effect, 0);
+ break;
+ }
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
@@ -418,37 +437,18 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
break;
}
}
+
+ // Stop walking the effect chain once we hit the definition of
+ // the {receiver} along the {effect}s.
+ if (IsSame(receiver, effect)) return kNoReceiverMaps;
+
+ // Continue with the next {effect}.
DCHECK_EQ(1, effect->op()->EffectInputCount());
effect = NodeProperties::GetEffectInput(effect);
}
}
// static
-MaybeHandle<Context> NodeProperties::GetSpecializationContext(
- Node* node, MaybeHandle<Context> context) {
- switch (node->opcode()) {
- case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
- case IrOpcode::kParameter: {
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- return context;
- }
- break;
- }
- default:
- break;
- }
- return MaybeHandle<Context>();
-}
-
-
-// static
Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
Node* context = NodeProperties::GetContextInput(node);
while (*depth > 0 &&
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index aa35ea84e0..02ab2ce044 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -79,6 +79,10 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// the present IfException projection is returned via {out_exception}.
static bool IsExceptionalCall(Node* node, Node** out_exception = nullptr);
+ // Returns the node producing the successful control output of {node}. This is
+ // the IfSuccess projection of {node} if present and {node} itself otherwise.
+ static Node* FindSuccessfulControlProjection(Node* node);
+
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
@@ -142,12 +146,6 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// ---------------------------------------------------------------------------
// Context.
- // Try to retrieve the specialization context from the given {node},
- // optionally utilizing the knowledge about the (outermost) function
- // {context}.
- static MaybeHandle<Context> GetSpecializationContext(
- Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
-
// Walk up the context chain from the given {node} until we reduce the {depth}
// to 0 or hit a node that does not extend the context chain ({depth} will be
// updated accordingly).
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 18736a1f56..ce152b1512 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -66,7 +66,6 @@
V(Call) \
V(Parameter) \
V(OsrValue) \
- V(OsrGuard) \
V(LoopExit) \
V(LoopExitValue) \
V(LoopExitEffect) \
@@ -147,6 +146,7 @@
V(JSStoreDataPropertyInLiteral) \
V(JSDeleteProperty) \
V(JSHasProperty) \
+ V(JSCreateGeneratorObject) \
V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
@@ -159,6 +159,7 @@
V(JSCreateScriptContext)
#define JS_OTHER_OP_LIST(V) \
+ V(JSConstructForwardVarargs) \
V(JSConstruct) \
V(JSConstructWithSpread) \
V(JSCallForwardVarargs) \
@@ -575,19 +576,14 @@
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
- V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4MinNum) \
- V(F32x4MaxNum) \
- V(F32x4RecipRefine) \
- V(F32x4RecipSqrtRefine) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
@@ -604,6 +600,7 @@
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -635,6 +632,7 @@
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -691,19 +689,16 @@
V(S128Load) \
V(S128Store) \
V(S128Zero) \
+ V(S128Not) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
- V(S128Not) \
- V(S32x4Select) \
- V(S32x4Swizzle) \
V(S32x4Shuffle) \
- V(S16x8Select) \
- V(S16x8Swizzle) \
+ V(S32x4Select) \
V(S16x8Shuffle) \
- V(S8x16Select) \
- V(S8x16Swizzle) \
+ V(S16x8Select) \
V(S8x16Shuffle) \
+ V(S8x16Select) \
V(S1x4Zero) \
V(S1x4And) \
V(S1x4Or) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 0c0a3d803a..35b24d8531 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -34,6 +34,10 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStrictEqual:
return false;
+ // Generator creation cannot call back into arbitrary JavaScript.
+ case IrOpcode::kJSCreateGeneratorObject:
+ return false;
+
// Binary operations
case IrOpcode::kJSAdd:
case IrOpcode::kJSSubtract:
@@ -92,6 +96,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToString:
// Call operations
+ case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSCallForwardVarargs:
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index 4f746e2944..e43cd5cdb0 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -14,7 +14,11 @@ namespace {
template <typename N>
V8_INLINE N CheckRange(size_t val) {
- CHECK_LE(val, std::numeric_limits<N>::max());
+ // The getters on Operator for input and output counts currently return int.
+ // Thus check that the given value fits in the integer range.
+ // TODO(titzer): Remove this check once the getters return size_t.
+ CHECK_LE(val, std::min(static_cast<size_t>(std::numeric_limits<N>::max()),
+ static_cast<size_t>(kMaxInt)));
return static_cast<N>(val);
}
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index ebf2c421b5..2de3df6354 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -94,8 +94,7 @@ void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
continue;
}
if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
- orig->opcode() == IrOpcode::kOsrValue ||
- orig->opcode() == IrOpcode::kOsrGuard) {
+ orig->opcode() == IrOpcode::kOsrValue) {
// No need to copy leaf nodes or parameters.
mapping->at(orig->id()) = orig;
continue;
@@ -254,20 +253,6 @@ void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
}
-void SetTypeForOsrValue(Node* osr_value, Node* loop,
- CommonOperatorBuilder* common) {
- Node* osr_guard = nullptr;
- for (Node* use : osr_value->uses()) {
- if (use->opcode() == IrOpcode::kOsrGuard) {
- DCHECK_EQ(use->InputAt(0), osr_value);
- osr_guard = use;
- break;
- }
- }
-
- NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
-}
-
} // namespace
void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
@@ -297,12 +282,6 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
CHECK(osr_loop); // Should have found the OSR loop.
- for (Node* use : osr_loop_entry->uses()) {
- if (use->opcode() == IrOpcode::kOsrValue) {
- SetTypeForOsrValue(use, osr_loop, common);
- }
- }
-
// Analyze the graph to determine how deeply nested the OSR loop is.
LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 585923fa69..bc8fd0cbe9 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -174,6 +174,8 @@ class PipelineData {
}
~PipelineData() {
+ delete code_generator_; // Must happen before zones are destroyed.
+ code_generator_ = nullptr;
DeleteRegisterAllocationZone();
DeleteInstructionZone();
DeleteGraphZone();
@@ -196,6 +198,8 @@ class PipelineData {
code_ = code;
}
+ CodeGenerator* code_generator() const { return code_generator_; }
+
// RawMachineAssembler generally produces graphs which cannot be verified.
bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
@@ -314,6 +318,11 @@ class PipelineData {
sequence(), debug_name());
}
+ void InitializeCodeGenerator(Linkage* linkage) {
+ DCHECK_NULL(code_generator_);
+ code_generator_ = new CodeGenerator(frame(), linkage, sequence(), info());
+ }
+
void BeginPhaseKind(const char* phase_kind_name) {
if (pipeline_statistics() != nullptr) {
pipeline_statistics()->BeginPhaseKind(phase_kind_name);
@@ -339,6 +348,7 @@ class PipelineData {
bool verify_graph_ = false;
bool is_asm_ = false;
Handle<Code> code_ = Handle<Code>::null();
+ CodeGenerator* code_generator_ = nullptr;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@@ -356,8 +366,7 @@ class PipelineData {
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
- // is
- // destroyed.
+ // is destroyed.
ZoneStats::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_ = nullptr;
@@ -400,8 +409,11 @@ class PipelineImpl final {
// Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Perform the actual code generation and return handle to a code object.
- Handle<Code> GenerateCode(Linkage* linkage);
+ // Run the code assembly pass.
+ void AssembleCode(Linkage* linkage);
+
+ // Run the code finalization pass.
+ Handle<Code> FinalizeCode();
bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
@@ -615,6 +627,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
return AbortOptimization(kGraphBuildingFailed);
}
+ // Make sure that we have generated the maximal number of deopt entries.
+ // This is in order to avoid triggering the generation of deopt entries later
+ // during code assembly.
+ Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate());
+
return SUCCEEDED;
}
@@ -624,7 +641,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
}
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
- Handle<Code> code = pipeline_.GenerateCode(linkage_);
+ pipeline_.AssembleCode(linkage_);
+ Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
return AbortOptimization(kCodeGenerationFailed);
@@ -663,6 +681,8 @@ class PipelineWasmCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
+ size_t AllocatedMemory() const override;
+
ZoneStats zone_stats_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
@@ -709,9 +729,14 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
return SUCCEEDED;
}
+size_t PipelineWasmCompilationJob::AllocatedMemory() const {
+ return pipeline_.data_->zone_stats()->GetCurrentAllocatedBytes();
+}
+
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::FinalizeJobImpl() {
- pipeline_.GenerateCode(&linkage_);
+ pipeline_.AssembleCode(&linkage_);
+ pipeline_.FinalizeCode();
return SUCCEEDED;
}
@@ -765,12 +790,12 @@ struct GraphBuilderPhase {
BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector()),
- data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+ data->info()->osr_ast_id(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), SourcePosition::kNotInlined, flags);
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
- temp_zone, data->info(), data->jsgraph(), 1.0f,
+ temp_zone, data->info(), data->jsgraph(), CallFrequency(1.0f),
data->loop_assignment(), data->source_positions());
succeeded = graph_builder.CreateGraph();
}
@@ -781,6 +806,30 @@ struct GraphBuilderPhase {
}
};
+namespace {
+
+Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
+ Context* current = closure->context();
+ size_t distance = 0;
+ while (!current->IsNativeContext()) {
+ if (current->IsModuleContext()) {
+ return Just(OuterContext(handle(current), distance));
+ }
+ current = current->previous();
+ distance++;
+ }
+ return Nothing<OuterContext>();
+}
+
+Maybe<OuterContext> ChooseSpecializationContext(CompilationInfo* info) {
+ if (info->is_function_context_specializing()) {
+ DCHECK(info->has_context());
+ return Just(OuterContext(handle(info->context()), 0));
+ }
+ return GetModuleContext(info->closure());
+}
+
+} // anonymous namespace
struct InliningPhase {
static const char* phase_name() { return "inlining"; }
@@ -797,9 +846,7 @@ struct InliningPhase {
data->info()->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
- data->info()->is_function_context_specializing()
- ? handle(data->info()->context())
- : MaybeHandle<Context>(),
+ ChooseSpecializationContext(data->info()),
data->info()->is_function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
@@ -1426,14 +1473,19 @@ struct JumpThreadingPhase {
}
};
+struct AssembleCodePhase {
+ static const char* phase_name() { return "assemble code"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ data->code_generator()->AssembleCode();
+ }
+};
-struct GenerateCodePhase {
- static const char* phase_name() { return "generate code"; }
+struct FinalizeCodePhase {
+ static const char* phase_name() { return "finalize code"; }
- void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- CodeGenerator generator(data->frame(), linkage, data->sequence(),
- data->info());
- data->set_code(generator.GenerateCode());
+ void Run(PipelineData* data, Zone* temp_zone) {
+ data->set_code(data->code_generator()->FinalizeCode());
}
};
@@ -1595,19 +1647,14 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Simplified lowering", true);
+ // From now on it is invalid to look at types on the nodes, because the types
+ // on the nodes might not make sense after representation selection due to the
+ // way we handle truncations; if we'd want to look at types afterwards we'd
+ // essentially need to re-type (large portions of) the graph.
+
+ // In order to catch bugs related to type access after this point, we now
+ // remove the types from the nodes (currently only in Debug builds).
#ifdef DEBUG
- // From now on it is invalid to look at types on the nodes, because:
- //
- // (a) The remaining passes (might) run concurrent to the main thread and
- // therefore must not access the Heap or the Isolate in an uncontrolled
- // way (as done by the type system), and
- // (b) the types on the nodes might not make sense after representation
- // selection due to the way we handle truncations; if we'd want to look
- // at types afterwards we'd essentially need to re-type (large portions
- // of) the graph.
- //
- // In order to catch bugs related to type access after this point we remove
- // the types from the nodes at this point (currently only in Debug builds).
Run<UntyperPhase>();
RunPrintAndVerify("Untyped", true);
#endif
@@ -1707,7 +1754,8 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
if (!pipeline.CreateGraph()) return Handle<Code>::null();
if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
- return pipeline.GenerateCode(&linkage);
+ pipeline.AssembleCode(&linkage);
+ return pipeline.FinalizeCode();
}
// static
@@ -1883,13 +1931,16 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
return true;
}
-Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
+void PipelineImpl::AssembleCode(Linkage* linkage) {
PipelineData* data = this->data_;
-
data->BeginPhaseKind("code generation");
+ data->InitializeCodeGenerator(linkage);
+ Run<AssembleCodePhase>();
+}
- // Generate final machine code.
- Run<GenerateCodePhase>(linkage);
+Handle<Code> PipelineImpl::FinalizeCode() {
+ PipelineData* data = this->data_;
+ Run<FinalizeCodePhase>();
Handle<Code> code = data->code();
if (data->profiler_data()) {
@@ -1937,7 +1988,8 @@ Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
// Generate the final machine code.
- return GenerateCode(&linkage);
+ AssembleCode(&linkage);
+ return FinalizeCode();
}
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 2967ad73ed..be10a67f24 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -592,11 +592,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
- __ frsp(kScratchDoubleReg, value); \
+ /* removed frsp as instruction-selector checked */ \
+ /* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
- __ stfs(kScratchDoubleReg, operand); \
+ __ stfs(value, operand); \
} else { \
- __ stfsx(kScratchDoubleReg, operand); \
+ __ stfsx(value, operand); \
} \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
@@ -704,11 +705,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
__ frsp(kScratchDoubleReg, value); \
+ /* removed frsp as instruction-selector checked */ \
+ /* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
- __ stfs(kScratchDoubleReg, operand); \
+ __ stfs(value, operand); \
} else { \
CleanUInt32(offset); \
- __ stfsx(kScratchDoubleReg, operand); \
+ __ stfsx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 449e710389..ea88e81a05 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -174,6 +174,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index a2cf562115..671aafe381 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -257,6 +257,26 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
}
+Node* RawMachineAssembler::CallCFunction6(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ MachineSignature::Builder builder(zone(), 1, 6);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
+ arg4, arg5);
+}
+
Node* RawMachineAssembler::CallCFunction8(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
@@ -278,6 +298,31 @@ Node* RawMachineAssembler::CallCFunction8(
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
+
+Node* RawMachineAssembler::CallCFunction9(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
+ MachineSignature::Builder builder(zone(), 1, 9);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ builder.AddParam(arg6_type);
+ builder.AddParam(arg7_type);
+ builder.AddParam(arg8_type);
+ Node* args[] = {function, arg0, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8};
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+ return AddNode(common()->Call(descriptor), arraysize(args), args);
+}
+
BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 19a0f3bfd4..a82f9e079a 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -84,8 +84,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
- Node* StackSlot(MachineRepresentation rep) {
- return AddNode(machine()->StackSlot(rep));
+ Node* StackSlot(MachineRepresentation rep, int alignment = 0) {
+ return AddNode(machine()->StackSlot(rep, alignment));
}
Node* Int64Constant(int64_t value) {
return AddNode(common()->Int64Constant(value));
@@ -773,6 +773,13 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
+ // Call to a C function with six arguments.
+ Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5);
// Call to a C function with eight arguments.
Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
@@ -781,6 +788,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
+ // Call to a C function with nine arguments.
+ Node* CallCFunction9(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, MachineType arg8_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+ Node* arg7, Node* arg8);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 0439c536de..f15df671cf 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -364,12 +364,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
op = simplified()->ChangeFloat64ToTaggedPointer();
} else if (output_rep == MachineRepresentation::kFloat32) {
- // float32 -> float64 -> tagged
- node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTaggedPointer();
+ if (output_type->Is(Type::Number())) {
+ // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
} else if (output_rep == MachineRepresentation::kFloat64) {
- // float64 -> tagged
- op = simplified()->ChangeFloat64ToTaggedPointer();
+ if (output_type->Is(Type::Number())) {
+ // float64 -> tagged
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
} else if (CanBeTaggedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type->Maybe(Type::SignedSmall())) {
@@ -452,11 +462,14 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
Type::Unsigned32())) { // float64 -> uint32 -> tagged
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
- } else {
+ } else if (output_type->Is(Type::Number())) {
op = simplified()->ChangeFloat64ToTagged(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
}
} else {
return TypeError(node, output_rep, output_type,
@@ -654,7 +667,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
+ ? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
@@ -686,7 +699,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedTaggedToInt32(
output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
+ ? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index af96f7333f..b4f3366d42 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -198,8 +198,8 @@ class UseInfo {
TypeCheckKind::kSignedSmall);
}
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
- return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSigned32);
+ return UseInfo(MachineRepresentation::kWord32,
+ Truncation::Any(identify_zeros), TypeCheckKind::kSigned32);
}
static UseInfo CheckedNumberAsFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 228ec3c0d5..f4e8ea13d2 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -702,6 +702,15 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
ArchOpcode opcode = SelectLoadOpcode(node);
@@ -2050,11 +2059,18 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord32BinOp(selector, node, kS390_Sub32,
SubOperandMode, cont);
case IrOpcode::kInt32MulWithOverflow:
- cont->OverwriteAndNegateIfEqual(kNotEqual);
- return VisitWord32BinOp(
- selector, node, kS390_Mul32WithOverflow,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
- cont);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord32BinOp(
+ selector, node, kS390_Mul32,
+ OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
+ } else {
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return VisitWord32BinOp(
+ selector, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ cont);
+ }
case IrOpcode::kInt32AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitWord32UnaryOp(selector, node, kS390_Abs32,
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index ea218671ad..3660553041 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -4,8 +4,8 @@
#include "src/compiler/schedule.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/ostreams.h"
namespace v8 {
@@ -96,6 +96,8 @@ BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
return b1;
}
+void BasicBlock::Print() { OFStream(stdout) << this; }
+
std::ostream& operator<<(std::ostream& os, const BasicBlock& block) {
os << "B" << block.id();
#if DEBUG
@@ -415,6 +417,21 @@ void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
merger->set_deferred(false);
block->predecessors().clear();
block->predecessors().push_back(merger);
+ MovePhis(block, merger);
+}
+
+void Schedule::MovePhis(BasicBlock* from, BasicBlock* to) {
+ for (size_t i = 0; i < from->NodeCount();) {
+ Node* node = from->NodeAt(i);
+ if (node->opcode() == IrOpcode::kPhi) {
+ to->AddNode(node);
+ from->RemoveNode(from->begin() + i);
+ DCHECK_EQ(nodeid_to_block_[node->id()], from);
+ nodeid_to_block_[node->id()] = to;
+ } else {
+ ++i;
+ }
+ }
}
void Schedule::PropagateDeferredMark() {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index b5e696dc41..ed69958e8b 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -65,6 +65,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
AssemblerDebugInfo debug_info() const { return debug_info_; }
#endif // DEBUG
+ void Print();
+
// Predecessors.
BasicBlockVector& predecessors() { return predecessors_; }
const BasicBlockVector& predecessors() const { return predecessors_; }
@@ -95,6 +97,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
+ void RemoveNode(iterator it) { nodes_.erase(it); }
+
typedef NodeVector::const_iterator const_iterator;
const_iterator begin() const { return nodes_.begin(); }
const_iterator end() const { return nodes_.end(); }
@@ -274,6 +278,8 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
void EnsureSplitEdgeForm(BasicBlock* block);
// Ensure entry into a deferred block happens from a single hot block.
void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
+ // Move Phi operands to newly created merger blocks
+ void MovePhis(BasicBlock* from, BasicBlock* to);
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 8f967788db..6cf88d33cf 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -16,6 +16,16 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+static const int kNumLanes32 = 4;
+static const int kNumLanes16 = 8;
+static const int kNumLanes8 = 16;
+static const int32_t kMask16 = 0xffff;
+static const int32_t kMask8 = 0xff;
+static const int32_t kShift16 = 16;
+static const int32_t kShift8 = 24;
+} // anonymous
+
SimdScalarLowering::SimdScalarLowering(
JSGraph* jsgraph, Signature<MachineRepresentation>* signature)
: jsgraph_(jsgraph),
@@ -35,7 +45,7 @@ SimdScalarLowering::SimdScalarLowering(
void SimdScalarLowering::LowerGraph() {
stack_.push_back({graph()->end(), 0});
state_.Set(graph()->end(), State::kOnStack);
- replacements_[graph()->end()->id()].type = SimdType::kInt32;
+ replacements_[graph()->end()->id()].type = SimdType::kInt32x4;
while (!stack_.empty()) {
NodeState& top = stack_.back();
@@ -73,11 +83,14 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4SConvertF32x4) \
V(I32x4UConvertF32x4) \
V(I32x4Neg) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
V(I32x4MaxS) \
+ V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
V(S128And) \
@@ -96,7 +109,6 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max)
@@ -120,6 +132,74 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4GtU) \
V(I32x4GeU)
+#define FOREACH_INT16X8_OPCODE(V) \
+ V(I16x8Splat) \
+ V(I16x8ExtractLane) \
+ V(I16x8ReplaceLane) \
+ V(I16x8Neg) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8ShrU) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU)
+
+#define FOREACH_INT8X16_OPCODE(V) \
+ V(I8x16Splat) \
+ V(I8x16ExtractLane) \
+ V(I8x16ReplaceLane) \
+ V(I8x16Neg) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16ShrU) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU)
+
+#define FOREACH_INT16X8_TO_SIMD1X8OPCODE(V) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8LtU) \
+ V(I16x8LeU)
+
+#define FOREACH_INT8X16_TO_SIMD1X16OPCODE(V) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16LtU) \
+ V(I8x16LeU)
+
+#define FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(V) \
+ V(Float32x4, Float32) \
+ V(Int32x4, Int32) \
+ V(Int16x8, Int16) \
+ V(Int8x16, Int8)
+
+#define FOREACH_SIMD_TYPE_TO_MACHINE_REP(V) \
+ V(Float32x4, Float32) \
+ V(Int32x4, Word32) \
+ V(Int16x8, Word16) \
+ V(Int8x16, Word8)
+
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
@@ -127,11 +207,11 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case IrOpcode::kReturn:
case IrOpcode::kParameter:
case IrOpcode::kCall: {
- replacements_[node->id()].type = SimdType::kInt32;
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kFloat32;
+ replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
@@ -139,24 +219,52 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
+ FOREACH_INT16X8_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ }
+ FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kSimd1x8;
+ break;
+ }
+ FOREACH_INT8X16_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ }
+ FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kSimd1x16;
+ break;
+ }
default: {
switch (output->opcode()) {
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kF32x4SConvertI32x4:
case IrOpcode::kF32x4UConvertI32x4: {
- replacements_[node->id()].type = SimdType::kInt32;
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
- replacements_[node->id()].type = SimdType::kFloat32;
+ replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
case IrOpcode::kS32x4Select: {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
+ FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ }
+ FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ }
+ case IrOpcode::kS16x8Select: {
+ replacements_[node->id()].type = SimdType::kSimd1x8;
+ break;
+ }
default: {
replacements_[node->id()].type = replacements_[output->id()].type;
}
@@ -200,42 +308,66 @@ static int GetReturnCountAfterLowering(
return result;
}
-void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
- new_indices[0] = index;
- for (size_t i = 1; i < kMaxLanes; ++i) {
- new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(
- static_cast<int>(i) * kLaneWidth)));
+int SimdScalarLowering::NumLanes(SimdType type) {
+ int num_lanes = 0;
+ if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4 ||
+ type == SimdType::kSimd1x4) {
+ num_lanes = kNumLanes32;
+ } else if (type == SimdType::kInt16x8 || type == SimdType::kSimd1x8) {
+ num_lanes = kNumLanes16;
+ } else if (type == SimdType::kInt8x16 || type == SimdType::kSimd1x16) {
+ num_lanes = kNumLanes8;
+ } else {
+ UNREACHABLE();
+ }
+ return num_lanes;
+}
+
+constexpr int SimdScalarLowering::kLaneOffsets[];
+
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
+ SimdType type) {
+ int num_lanes = NumLanes(type);
+ int lane_width = kSimd128Size / num_lanes;
+ int laneIndex = kLaneOffsets[0] / lane_width;
+ new_indices[laneIndex] = index;
+ for (int i = 1; i < num_lanes; ++i) {
+ laneIndex = kLaneOffsets[i * lane_width] / lane_width;
+ new_indices[laneIndex] = graph()->NewNode(
+ machine()->Int32Add(), index,
+ graph()->NewNode(
+ common()->Int32Constant(static_cast<int>(i) * lane_width)));
}
}
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op) {
+ const Operator* load_op, SimdType type) {
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* indices[kMaxLanes];
- GetIndexNodes(index, indices);
- Node* rep_nodes[kMaxLanes];
+ int num_lanes = NumLanes(type);
+ Node** indices = zone()->NewArray<Node*>(num_lanes);
+ GetIndexNodes(index, indices, type);
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
+ rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(rep_nodes[0], load_op);
if (node->InputCount() > 2) {
DCHECK(node->InputCount() > 3);
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
- rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
- control_input);
- rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
- control_input);
- rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
- control_input);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
+ control_input);
+ effect_input = rep_nodes[i];
+ }
rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
} else {
- for (size_t i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
}
}
- ReplaceNode(node, rep_nodes);
+ ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
@@ -247,36 +379,36 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* indices[kMaxLanes];
- GetIndexNodes(index, indices);
+ int num_lanes = NumLanes(rep_type);
+ Node** indices = zone()->NewArray<Node*>(num_lanes);
+ GetIndexNodes(index, indices, rep_type);
DCHECK(node->InputCount() > 2);
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
- Node* rep_nodes[kMaxLanes];
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
Node** rep_inputs = GetReplacementsWithType(value, rep_type);
rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+ rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(node, store_op);
if (node->InputCount() > 3) {
DCHECK(node->InputCount() > 4);
Node* effect_input = node->InputAt(3);
Node* control_input = node->InputAt(4);
- rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
- effect_input, control_input);
- rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
- rep_nodes[3], control_input);
- rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
- rep_nodes[2], control_input);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ rep_nodes[i] =
+ graph()->NewNode(store_op, base, indices[i], rep_inputs[i],
+ effect_input, control_input);
+ effect_input = rep_nodes[i];
+ }
rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
-
} else {
- for (size_t i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] =
graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
}
}
-
- ReplaceNode(node, rep_nodes);
+ ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
@@ -287,47 +419,146 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
if (invert_inputs) {
rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
} else {
rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
+ return graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), input,
+ jsgraph_->Int32Constant(shift)),
+ jsgraph_->Int32Constant(shift));
+}
+
+void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
+ SimdType input_rep_type,
+ const Operator* op) {
+ DCHECK(node->InputCount() == 2);
+ DCHECK(input_rep_type == SimdType::kInt16x8 ||
+ input_rep_type == SimdType::kInt8x16);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ int32_t shift_val =
+ (input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]),
+ shift_val);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+Node* SimdScalarLowering::Mask(Node* input, int32_t mask) {
+ return graph()->NewNode(machine()->Word32And(), input,
+ jsgraph_->Int32Constant(mask));
+}
+
+void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
+ SimdType input_rep_type,
+ const Operator* op,
+ bool is_signed) {
+ DCHECK(node->InputCount() == 2);
+ DCHECK(input_rep_type == SimdType::kInt16x8 ||
+ input_rep_type == SimdType::kInt8x16);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int32_t min = 0;
+ int32_t max = 0;
+ int32_t mask = 0;
+ int32_t shift_val = 0;
+ MachineRepresentation phi_rep;
+ if (input_rep_type == SimdType::kInt16x8) {
+ if (is_signed) {
+ min = std::numeric_limits<int16_t>::min();
+ max = std::numeric_limits<int16_t>::max();
+ } else {
+ min = std::numeric_limits<uint16_t>::min();
+ max = std::numeric_limits<uint16_t>::max();
+ }
+ mask = kMask16;
+ shift_val = kShift16;
+ phi_rep = MachineRepresentation::kWord16;
+ } else {
+ if (is_signed) {
+ min = std::numeric_limits<int8_t>::min();
+ max = std::numeric_limits<int8_t>::max();
+ } else {
+ min = std::numeric_limits<uint8_t>::min();
+ max = std::numeric_limits<uint8_t>::max();
+ }
+ mask = kMask8;
+ shift_val = kShift8;
+ phi_rep = MachineRepresentation::kWord8;
+ }
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* op_result = nullptr;
+ Node* left = is_signed ? rep_left[i] : Mask(rep_left[i], mask);
+ Node* right = is_signed ? rep_right[i] : Mask(rep_right[i], mask);
+ op_result = graph()->NewNode(op, left, right);
+ Diamond d_min(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(), op_result,
+ jsgraph_->Int32Constant(min)));
+ rep_node[i] = d_min.Phi(phi_rep, jsgraph_->Int32Constant(min), op_result);
+ Diamond d_max(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(),
+ jsgraph_->Int32Constant(max), rep_node[i]));
+ rep_node[i] = d_max.Phi(phi_rep, jsgraph_->Int32Constant(max), rep_node[i]);
+ rep_node[i] =
+ is_signed ? rep_node[i] : FixUpperBits(rep_node[i], shift_val);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
const Operator* op) {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = graph()->NewNode(op, rep[i]);
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
- bool is_max) {
+ bool is_max, SimdType type) {
DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
- Node** rep_right =
- GetReplacementsWithType(node->InputAt(1), SimdType::kInt32);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ MachineRepresentation rep = MachineRepresentation::kNone;
+ if (type == SimdType::kInt32x4) {
+ rep = MachineRepresentation::kWord32;
+ } else if (type == SimdType::kInt16x8) {
+ rep = MachineRepresentation::kWord16;
+ } else if (type == SimdType::kInt8x16) {
+ rep = MachineRepresentation::kWord8;
+ } else {
+ UNREACHABLE();
+ }
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
if (is_max) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_right[i], rep_left[i]);
+ rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
} else {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_left[i], rep_right[i]);
+ rep_node[i] = d.Phi(rep, rep_left[i], rep_right[i]);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
@@ -362,14 +593,14 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
DCHECK(node->InputCount() == 1);
- Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32);
- Node* rep_node[kMaxLanes];
+ Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32x4);
+ Node* rep_node[kNumLanes32];
Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
@@ -390,21 +621,55 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
graph()->NewNode(machine()->TruncateFloat64ToUint32(), trunc);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
}
-void SimdScalarLowering::LowerShiftOp(Node* node, const Operator* op) {
- static int32_t shift_mask = 0x1f;
+void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(1, node->InputCount());
int32_t shift_amount = OpParameter<int32_t>(node);
- Node* shift_node =
- graph()->NewNode(common()->Int32Constant(shift_amount & shift_mask));
- Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep[i], shift_node);
+ Node* shift_node = graph()->NewNode(common()->Int32Constant(shift_amount));
+ Node** rep = GetReplacementsWithType(node->InputAt(0), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = rep[i];
+ switch (node->opcode()) {
+ case IrOpcode::kI8x16ShrU:
+ rep_node[i] = Mask(rep_node[i], kMask8);
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI16x8ShrU:
+ rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
+ case IrOpcode::kI32x4ShrU:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI32x4Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI16x8Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ rep_node[i] = FixUpperBits(rep_node[i], kShift16);
+ break;
+ case IrOpcode::kI8x16Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ rep_node[i] = FixUpperBits(rep_node[i], kShift8);
+ break;
+ case IrOpcode::kI32x4ShrS:
+ case IrOpcode::kI16x8ShrS:
+ case IrOpcode::kI8x16ShrS:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Sar(), rep_node[i], shift_node);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
@@ -412,18 +677,20 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
rep_node[i] = d.Phi(MachineRepresentation::kWord32,
jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
+ int num_lanes = NumLanes(rep_type);
switch (node->opcode()) {
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering();
@@ -449,19 +716,19 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (old_index == new_index) {
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
- Node* new_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node* new_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
new_node[i] = nullptr;
}
new_node[0] = node;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kSimd128) {
- for (int i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < kNumLanes32; ++i) {
new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
graph()->start());
}
}
- ReplaceNode(node, new_node);
+ ReplaceNode(node, new_node, kNumLanes32);
}
}
break;
@@ -470,24 +737,36 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
- if (rep_type == SimdType::kInt32) {
- load_op = machine()->Load(MachineType::Int32());
- } else if (rep_type == SimdType::kFloat32) {
- load_op = machine()->Load(MachineType::Float32());
+#define LOAD_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ load_op = machine()->Load(MachineType::mType()); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(LOAD_CASE)
+ default:
+ UNREACHABLE();
}
- LowerLoadOp(rep, node, load_op);
+#undef LOAD_CASE
+ LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
UnalignedLoadRepresentationOf(node->op()).representation();
const Operator* load_op;
- if (rep_type == SimdType::kInt32) {
- load_op = machine()->UnalignedLoad(MachineType::Int32());
- } else if (rep_type == SimdType::kFloat32) {
- load_op = machine()->UnalignedLoad(MachineType::Float32());
+#define UNALIGNED_LOAD_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ load_op = machine()->UnalignedLoad(MachineType::mType()); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(UNALIGNED_LOAD_CASE)
+ default:
+ UNREACHABLE();
}
- LowerLoadOp(rep, node, load_op);
+#undef UNALIGHNED_LOAD_CASE
+ LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kStore: {
@@ -496,24 +775,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
- if (rep_type == SimdType::kInt32) {
- store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, write_barrier_kind));
- } else {
- store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kFloat32, write_barrier_kind));
+#define STORE_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ store_op = machine()->Store(StoreRepresentation( \
+ MachineRepresentation::k##mType, write_barrier_kind)); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(STORE_CASE)
+ default:
+ UNREACHABLE();
}
+#undef STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
- if (rep_type == SimdType::kInt32) {
- store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
- } else {
- store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+#define UNALIGNED_STORE_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ store_op = machine()->UnalignedStore(MachineRepresentation::k##mType); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(UNALIGNED_STORE_CASE)
+ default:
+ UNREACHABLE();
}
+#undef UNALIGNED_STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
@@ -541,12 +831,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] =
graph()->NewNode(common()->Projection(i), node, graph()->start());
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
}
break;
}
@@ -559,7 +849,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node** rep_input =
GetReplacementsWithType(node->InputAt(i), rep_type);
- for (int j = 0; j < kMaxLanes; j++) {
+ for (int j = 0; j < num_lanes; j++) {
rep_node[j]->ReplaceInput(i, rep_input[j]);
}
}
@@ -580,42 +870,93 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
- case IrOpcode::kI32x4MaxS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), true);
+ case IrOpcode::kI16x8Add:
+ case IrOpcode::kI8x16Add: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
+ break;
+ }
+ case IrOpcode::kI16x8Sub:
+ case IrOpcode::kI8x16Sub: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Sub());
+ break;
+ }
+ case IrOpcode::kI16x8Mul:
+ case IrOpcode::kI8x16Mul: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
- case IrOpcode::kI32x4MinS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), false);
+ case IrOpcode::kI16x8AddSaturateS:
+ case IrOpcode::kI8x16AddSaturateS: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
break;
}
- case IrOpcode::kI32x4MaxU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), true);
+ case IrOpcode::kI16x8SubSaturateS:
+ case IrOpcode::kI8x16SubSaturateS: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
break;
}
- case IrOpcode::kI32x4MinU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), false);
+ case IrOpcode::kI16x8AddSaturateU:
+ case IrOpcode::kI8x16AddSaturateU: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
break;
}
- case IrOpcode::kI32x4Neg: {
+ case IrOpcode::kI16x8SubSaturateU:
+ case IrOpcode::kI8x16SubSaturateU: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
+ break;
+ }
+ case IrOpcode::kI32x4MaxS:
+ case IrOpcode::kI16x8MaxS:
+ case IrOpcode::kI8x16MaxS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), true, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MinS:
+ case IrOpcode::kI16x8MinS:
+ case IrOpcode::kI8x16MinS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), false, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MaxU:
+ case IrOpcode::kI16x8MaxU:
+ case IrOpcode::kI8x16MaxU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), true, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MinU:
+ case IrOpcode::kI16x8MinU:
+ case IrOpcode::kI8x16MinU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4Neg:
+ case IrOpcode::kI16x8Neg:
+ case IrOpcode::kI8x16Neg: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* rep_node[kMaxLanes];
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
Node* zero = graph()->NewNode(common()->Int32Constant(0));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]);
+ if (node->opcode() == IrOpcode::kI16x8Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift16);
+ } else if (node->opcode() == IrOpcode::kI8x16Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift8);
+ }
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kS128Not: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* rep_node[kMaxLanes];
+ Node* rep_node[kNumLanes32];
Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
break;
}
case IrOpcode::kI32x4SConvertF32x4: {
@@ -626,16 +967,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerConvertFromFloat(node, false);
break;
}
- case IrOpcode::kI32x4Shl: {
- LowerShiftOp(node, machine()->Word32Shl());
- break;
- }
- case IrOpcode::kI32x4ShrS: {
- LowerShiftOp(node, machine()->Word32Sar());
- break;
- }
- case IrOpcode::kI32x4ShrU: {
- LowerShiftOp(node, machine()->Word32Shr());
+ case IrOpcode::kI32x4Shl:
+ case IrOpcode::kI16x8Shl:
+ case IrOpcode::kI8x16Shl:
+ case IrOpcode::kI32x4ShrS:
+ case IrOpcode::kI16x8ShrS:
+ case IrOpcode::kI8x16ShrS:
+ case IrOpcode::kI32x4ShrU:
+ case IrOpcode::kI16x8ShrU:
+ case IrOpcode::kI8x16ShrU: {
+ LowerShiftOp(node, rep_type);
break;
}
#define F32X4_BINOP_CASE(name) \
@@ -646,7 +987,6 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_BINOP_CASE(Add)
F32X4_BINOP_CASE(Sub)
F32X4_BINOP_CASE(Mul)
- F32X4_BINOP_CASE(Div)
F32X4_BINOP_CASE(Min)
F32X4_BINOP_CASE(Max)
#undef F32X4_BINOP_CASE
@@ -657,51 +997,57 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
- F32X4_UNOP_CASE(Sqrt)
#undef F32x4_UNOP_CASE
case IrOpcode::kF32x4SConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32, machine()->RoundInt32ToFloat32());
+ LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundInt32ToFloat32());
break;
}
case IrOpcode::kF32x4UConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32, machine()->RoundUint32ToFloat32());
+ LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
case IrOpcode::kI32x4Splat:
- case IrOpcode::kF32x4Splat: {
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ case IrOpcode::kF32x4Splat:
+ case IrOpcode::kI16x8Splat:
+ case IrOpcode::kI8x16Splat: {
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
if (HasReplacement(0, node->InputAt(0))) {
rep_node[i] = GetReplacements(node->InputAt(0))[0];
} else {
rep_node[i] = node->InputAt(0);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ExtractLane:
- case IrOpcode::kF32x4ExtractLane: {
+ case IrOpcode::kF32x4ExtractLane:
+ case IrOpcode::kI16x8ExtractLane:
+ case IrOpcode::kI8x16ExtractLane: {
int32_t lane = OpParameter<int32_t>(node);
- Node* rep_node[kMaxLanes] = {
- GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
- nullptr, nullptr};
- ReplaceNode(node, rep_node);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
+ for (int i = 1; i < num_lanes; ++i) {
+ rep_node[i] = nullptr;
+ }
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ReplaceLane:
- case IrOpcode::kF32x4ReplaceLane: {
+ case IrOpcode::kF32x4ReplaceLane:
+ case IrOpcode::kI16x8ReplaceLane:
+ case IrOpcode::kI8x16ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
- DCHECK(lane >= 0 && lane <= 3);
Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
rep_node[lane] = repNode;
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
@@ -709,51 +1055,84 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
break; \
}
- COMPARISON_CASE(Float32, kF32x4Eq, Float32Equal, false)
- COMPARISON_CASE(Float32, kF32x4Lt, Float32LessThan, false)
- COMPARISON_CASE(Float32, kF32x4Le, Float32LessThanOrEqual, false)
- COMPARISON_CASE(Float32, kF32x4Gt, Float32LessThan, true)
- COMPARISON_CASE(Float32, kF32x4Ge, Float32LessThanOrEqual, true)
- COMPARISON_CASE(Int32, kI32x4Eq, Word32Equal, false)
- COMPARISON_CASE(Int32, kI32x4LtS, Int32LessThan, false)
- COMPARISON_CASE(Int32, kI32x4LeS, Int32LessThanOrEqual, false)
- COMPARISON_CASE(Int32, kI32x4GtS, Int32LessThan, true)
- COMPARISON_CASE(Int32, kI32x4GeS, Int32LessThanOrEqual, true)
- COMPARISON_CASE(Int32, kI32x4LtU, Uint32LessThan, false)
- COMPARISON_CASE(Int32, kI32x4LeU, Uint32LessThanOrEqual, false)
- COMPARISON_CASE(Int32, kI32x4GtU, Uint32LessThan, true)
- COMPARISON_CASE(Int32, kI32x4GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
+ COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
+ COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
+ COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
+ COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
+ COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32x4, kI32x4GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int32x4, kI32x4GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32x4, kI32x4LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int32x4, kI32x4LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32x4, kI32x4GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int32x4, kI32x4GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Int16x8, kI16x8Eq, Word32Equal, false)
+ COMPARISON_CASE(Int16x8, kI16x8LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int16x8, kI16x8LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int16x8, kI16x8GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int16x8, kI16x8GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int16x8, kI16x8LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int16x8, kI16x8LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int16x8, kI16x8GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int16x8, kI16x8GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Int8x16, kI8x16Eq, Word32Equal, false)
+ COMPARISON_CASE(Int8x16, kI8x16LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int8x16, kI8x16LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int8x16, kI8x16GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int8x16, kI8x16GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int8x16, kI8x16LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int8x16, kI8x16LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int8x16, kI8x16GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int8x16, kI8x16GeU, Uint32LessThanOrEqual, true)
#undef COMPARISON_CASE
case IrOpcode::kF32x4Ne: {
- LowerNotEqual(node, SimdType::kFloat32, machine()->Float32Equal());
+ LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
}
case IrOpcode::kI32x4Ne: {
- LowerNotEqual(node, SimdType::kInt32, machine()->Word32Equal());
+ LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
break;
}
- case IrOpcode::kS32x4Select: {
+ case IrOpcode::kI16x8Ne: {
+ LowerNotEqual(node, SimdType::kInt16x8, machine()->Word32Equal());
+ break;
+ }
+ case IrOpcode::kI8x16Ne: {
+ LowerNotEqual(node, SimdType::kInt8x16, machine()->Word32Equal());
+ break;
+ }
+ case IrOpcode::kS32x4Select:
+ case IrOpcode::kS16x8Select:
+ case IrOpcode::kS8x16Select: {
DCHECK(node->InputCount() == 3);
- DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4);
+ DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kSimd1x8 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kSimd1x16);
Node** boolean_input = GetReplacements(node->InputAt(0));
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
jsgraph_->Int32Constant(0)));
- if (rep_type == SimdType::kFloat32) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kFloat32, rep_right[1], rep_left[0]);
- } else if (rep_type == SimdType::kInt32) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_right[1], rep_left[0]);
- } else {
- UNREACHABLE();
+#define SELECT_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ rep_node[i] = \
+ d.Phi(MachineRepresentation::k##mType, rep_right[1], rep_left[0]); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(SELECT_CASE)
+ default:
+ UNREACHABLE();
}
+#undef SELECT_CASE
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
default: { DefaultLowering(node); }
@@ -770,7 +1149,7 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
}
if (HasReplacement(1, input)) {
something_changed = true;
- for (int j = 1; j < kMaxLanes; j++) {
+ for (int j = 1; j < ReplacementCount(input); ++j) {
node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
}
}
@@ -778,18 +1157,17 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
return something_changed;
}
-void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
- // if new_low == nullptr, then also new_high == nullptr.
- DCHECK(new_node[0] != nullptr ||
- (new_node[1] == nullptr && new_node[2] == nullptr &&
- new_node[3] == nullptr));
- for (int i = 0; i < kMaxLanes; ++i) {
- replacements_[old->id()].node[i] = new_node[i];
+void SimdScalarLowering::ReplaceNode(Node* old, Node** new_nodes, int count) {
+ replacements_[old->id()].node = zone()->NewArray<Node*>(count);
+ for (int i = 0; i < count; ++i) {
+ replacements_[old->id()].node[i] = new_nodes[i];
}
+ replacements_[old->id()].num_replacements = count;
}
bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
- return replacements_[node->id()].node[index] != nullptr;
+ return replacements_[node->id()].node != nullptr &&
+ replacements_[node->id()].node[index] != nullptr;
}
SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
@@ -802,30 +1180,61 @@ Node** SimdScalarLowering::GetReplacements(Node* node) {
return result;
}
+int SimdScalarLowering::ReplacementCount(Node* node) {
+ return replacements_[node->id()].num_replacements;
+}
+
+void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes32; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastInt32ToFloat32(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
+void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes32; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastFloat32ToInt32(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
return GetReplacements(node);
}
- Node** result = zone()->NewArray<Node*>(kMaxLanes);
- if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
- for (int i = 0; i < kMaxLanes; ++i) {
- if (replacements[i] != nullptr) {
- result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
- replacements[i]);
- } else {
- result[i] = nullptr;
- }
+ int num_lanes = NumLanes(type);
+ Node** result = zone()->NewArray<Node*>(num_lanes);
+ if (type == SimdType::kInt32x4) {
+ if (ReplacementType(node) == SimdType::kFloat32x4) {
+ Float32ToInt32(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt16x8) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
}
- } else if (ReplacementType(node) == SimdType::kFloat32 &&
- type == SimdType::kInt32) {
- for (int i = 0; i < kMaxLanes; ++i) {
- if (replacements[i] != nullptr) {
- result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
- replacements[i]);
- } else {
- result[i] = nullptr;
- }
+ } else if (type == SimdType::kFloat32x4) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToFloat32(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt16x8) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+ } else if (type == SimdType::kInt16x8) {
+ if (ReplacementType(node) == SimdType::kInt32x4 ||
+ ReplacementType(node) == SimdType::kFloat32x4) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
}
} else {
UNREACHABLE();
@@ -842,31 +1251,34 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
// graph verifier.
int value_count = phi->op()->ValueInputCount();
SimdType type = ReplacementType(phi);
- Node** inputs_rep[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(type);
+ Node*** inputs_rep = zone()->NewArray<Node**>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
}
for (int i = 0; i < value_count; ++i) {
- for (int j = 0; j < kMaxLanes; j++) {
+ for (int j = 0; j < num_lanes; ++j) {
inputs_rep[j][i] = placeholder_;
}
}
- Node* rep_nodes[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
- if (type == SimdType::kInt32) {
- rep_nodes[i] = graph()->NewNode(
- common()->Phi(MachineRepresentation::kWord32, value_count),
- value_count + 1, inputs_rep[i], false);
- } else if (type == SimdType::kFloat32) {
- rep_nodes[i] = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, value_count),
- value_count + 1, inputs_rep[i], false);
- } else {
- UNREACHABLE();
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+#define PHI_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ rep_nodes[i] = graph()->NewNode( \
+ common()->Phi(MachineRepresentation::k##mType, value_count), \
+ value_count + 1, inputs_rep[i], false); \
+ break;
+
+ switch (type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(PHI_CASE)
+ default:
+ UNREACHABLE();
}
+#undef PHI_CASE
}
- ReplaceNode(phi, rep_nodes);
+ ReplaceNode(phi, rep_nodes, num_lanes);
}
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 70186fdf11..09c78dc983 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -28,14 +28,27 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kInt32, kFloat32, kSimd1x4 };
-
- static const int kMaxLanes = 4;
- static const int kLaneWidth = 16 / kMaxLanes;
+ enum class SimdType : uint8_t {
+ kFloat32x4,
+ kInt32x4,
+ kInt16x8,
+ kInt8x16,
+ kSimd1x4,
+ kSimd1x8,
+ kSimd1x16
+ };
+#if defined(V8_TARGET_BIG_ENDIAN)
+ static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+#else
+ static constexpr int kLaneOffsets[16] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+#endif
struct Replacement {
- Node* node[kMaxLanes];
- SimdType type; // represents what input type is expected
+ Node** node = nullptr;
+ SimdType type; // represents output type
+ int num_replacements = 0;
};
struct NodeState {
@@ -52,24 +65,35 @@ class SimdScalarLowering {
void LowerNode(Node* node);
bool DefaultLowering(Node* node);
- void ReplaceNode(Node* old, Node** new_nodes);
+ int NumLanes(SimdType type);
+ void ReplaceNode(Node* old, Node** new_nodes, int count);
bool HasReplacement(size_t index, Node* node);
Node** GetReplacements(Node* node);
+ int ReplacementCount(Node* node);
+ void Float32ToInt32(Node** replacements, Node** result);
+ void Int32ToFloat32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
- void GetIndexNodes(Node* index, Node** new_indices);
+ void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op);
+ const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
+ Node* FixUpperBits(Node* input, int32_t shift);
+ void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
+ const Operator* op);
+ Node* Mask(Node* input, int32_t mask);
+ void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
+ const Operator* op, bool is_signed);
void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
- void LowerIntMinMax(Node* node, const Operator* op, bool is_max);
+ void LowerIntMinMax(Node* node, const Operator* op, bool is_max,
+ SimdType type);
void LowerConvertFromFloat(Node* node, bool is_signed);
- void LowerShiftOp(Node* node, const Operator* op);
+ void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index d0f952a9ec..1691f1618f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1073,6 +1073,51 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTagged);
}
+ void VisitFrameState(Node* node) {
+ DCHECK_EQ(5, node->op()->ValueInputCount());
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // Parameters.
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // Registers.
+
+ // Expression stack/accumulator.
+ if (node->InputAt(2)->opcode() == IrOpcode::kStateValues ||
+ node->InputAt(2)->opcode() == IrOpcode::kTypedStateValues) {
+ // TODO(turbofan): This should only be produced by AST graph builder.
+ // Remove once we switch to bytecode graph builder exclusively.
+ ProcessInput(node, 2, UseInfo::AnyTagged());
+ } else {
+ // Accumulator is a special flower - we need to remember its type in
+ // a singleton typed-state-values node (as if it was a singleton
+ // state-values node).
+ if (propagate()) {
+ EnqueueInput(node, 2, UseInfo::Any());
+ } else if (lower()) {
+ Zone* zone = jsgraph_->zone();
+ Node* accumulator = node->InputAt(2);
+ if (accumulator == jsgraph_->OptimizedOutConstant()) {
+ node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
+ } else {
+ ZoneVector<MachineType>* types =
+ new (zone->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(1, zone);
+ (*types)[0] = DeoptMachineTypeOf(
+ GetInfo(accumulator)->representation(), TypeOf(accumulator));
+
+ node->ReplaceInput(2, jsgraph_->graph()->NewNode(
+ jsgraph_->common()->TypedStateValues(
+ types, SparseInputMask::Dense()),
+ accumulator));
+ }
+ }
+ }
+
+ ProcessInput(node, 3, UseInfo::AnyTagged()); // Context.
+ ProcessInput(node, 4, UseInfo::AnyTagged()); // Closure.
+ ProcessInput(node, 5, UseInfo::AnyTagged()); // Outer frame state.
+ return SetOutput(node, MachineRepresentation::kTagged);
+ }
+
void VisitObjectState(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
@@ -1402,30 +1447,6 @@ class RepresentationSelector {
return;
}
- void VisitOsrGuard(Node* node) {
- VisitInputs(node);
-
- // Insert a dynamic check for the OSR value type if necessary.
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- // At this point, we should always have a type for the OsrValue.
- UNREACHABLE();
- break;
- case OsrGuardType::kSignedSmall:
- if (lower()) {
- NodeProperties::ChangeOp(node,
- simplified()->CheckedTaggedToTaggedSigned());
- }
- return SetOutput(node, MachineRepresentation::kTaggedSigned);
- case OsrGuardType::kAny: // Nothing to check.
- if (lower()) {
- DeferReplacement(node, node->InputAt(0));
- }
- return SetOutput(node, MachineRepresentation::kTagged);
- }
- UNREACHABLE();
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1531,11 +1552,14 @@ class RepresentationSelector {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
- } else {
- DCHECK(CanBeTaggedPointer(input_info->representation()));
+ } else if (CanBeTaggedPointer(input_info->representation())) {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ } else {
+ DCHECK_EQ(MachineRepresentation::kNone,
+ input_info->representation());
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else {
// No input representation requirement; adapt during lowering.
@@ -2700,11 +2724,7 @@ class RepresentationSelector {
switch (mode) {
case CheckFloat64HoleMode::kAllowReturnHole:
if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsWord32()) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if (truncation.IsUsedAsFloat64()) {
+ if (truncation.IsUsedAsFloat64()) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -2775,6 +2795,8 @@ class RepresentationSelector {
MachineRepresentation::kFloat64);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
+ case IrOpcode::kFrameState:
+ return VisitFrameState(node);
case IrOpcode::kStateValues:
return VisitStateValues(node);
case IrOpcode::kObjectState:
@@ -2783,16 +2805,19 @@ class RepresentationSelector {
// We just get rid of the sigma here. In principle, it should be
// possible to refine the truncation and representation based on
// the sigma's type.
- MachineRepresentation output =
+ MachineRepresentation representation =
GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
- VisitUnop(node, UseInfo(output, truncation), output);
+
+ // For now, we just handle specially the impossible case.
+ MachineRepresentation output = TypeOf(node)->IsInhabited()
+ ? representation
+ : MachineRepresentation::kNone;
+
+ VisitUnop(node, UseInfo(representation, truncation), output);
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
- case IrOpcode::kOsrGuard:
- return VisitOsrGuard(node);
-
case IrOpcode::kFinishRegion:
VisitInputs(node);
// Assume the output is tagged pointer.
@@ -2810,10 +2835,11 @@ class RepresentationSelector {
case IrOpcode::kIfException:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kDeoptimize:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
- case IrOpcode::kFrameState:
case IrOpcode::kCheckpoint:
case IrOpcode::kLoop:
case IrOpcode::kMerge:
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9fb0fc55bf..476f423749 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -8,6 +8,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 3750861bf0..ac53bfc72e 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -141,7 +141,8 @@ enum class CheckForMinusZeroMode : uint8_t {
size_t hash_value(CheckForMinusZeroMode);
-std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ CheckForMinusZeroMode);
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 94c54ac600..f92d507dfb 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -607,19 +607,6 @@ Type* Typer::Visitor::TypeParameter(Node* node) {
Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
-Type* Typer::Visitor::TypeOsrGuard(Node* node) {
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- return Type::None();
- case OsrGuardType::kSignedSmall:
- return Type::SignedSmall();
- case OsrGuardType::kAny:
- return Type::Any();
- }
- UNREACHABLE();
- return nullptr;
-}
-
Type* Typer::Visitor::TypeRetain(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1116,15 +1103,23 @@ Type* Typer::Visitor::TypeJSCreate(Node* node) { return Type::Object(); }
Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
- return Type::OtherObject();
+ switch (CreateArgumentsTypeOf(node->op())) {
+ case CreateArgumentsType::kRestParameter:
+ return Type::Array();
+ case CreateArgumentsType::kMappedArguments:
+ case CreateArgumentsType::kUnmappedArguments:
+ return Type::OtherObject();
+ }
+ UNREACHABLE();
+ return nullptr;
}
+Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
-Type* Typer::Visitor::TypeJSCreateArray(Node* node) {
+Type* Typer::Visitor::TypeJSCreateGeneratorObject(Node* node) {
return Type::OtherObject();
}
-
Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
return Type::Function();
}
@@ -1139,7 +1134,7 @@ Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
}
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
- return Type::OtherObject();
+ return Type::Array();
}
@@ -1340,6 +1335,10 @@ Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
// JS other operators.
+Type* Typer::Visitor::TypeJSConstructForwardVarargs(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
@@ -1516,6 +1515,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
// Object functions.
case kObjectAssign:
+ return Type::Receiver();
case kObjectCreate:
return Type::OtherObject();
case kObjectHasOwnProperty:
@@ -1527,7 +1527,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kRegExpCompile:
return Type::OtherObject();
case kRegExpExec:
- return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+ return Type::Union(Type::Array(), Type::Null(), t->zone());
case kRegExpTest:
return Type::Boolean();
case kRegExpToString:
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index e0de4ef97a..ef2d3a0ef6 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -208,6 +208,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
return kOtherCallable;
}
return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
case JS_VALUE_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
@@ -216,7 +218,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
- case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
@@ -297,6 +298,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
+ case CELL_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -311,24 +313,23 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
- case CALL_HANDLER_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
- case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
- case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
- case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
- case CONSTANT_ELEMENTS_PAIR_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
+ case PADDING_TYPE_1:
+ case PADDING_TYPE_2:
+ case PADDING_TYPE_3:
+ case PADDING_TYPE_4:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index fe0df3300f..452ac7658e 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -126,6 +126,7 @@ namespace compiler {
V(Hole, 1u << 22) \
V(OtherInternal, 1u << 23) \
V(ExternalPointer, 1u << 24) \
+ V(Array, 1u << 25) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -166,12 +167,14 @@ namespace compiler {
V(Primitive, kSymbol | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
V(Proxy, kCallableProxy | kOtherProxy) \
+ V(ArrayOrOtherObject, kArray | kOtherObject) \
+ V(ArrayOrProxy, kArray | kProxy) \
V(DetectableCallable, kFunction | kBoundFunction | \
kOtherCallable | kCallableProxy) \
V(Callable, kDetectableCallable | kOtherUndetectable) \
- V(NonCallable, kOtherObject | kOtherProxy) \
+ V(NonCallable, kArray | kOtherObject | kOtherProxy) \
V(NonCallableOrNull, kNonCallable | kNull) \
- V(DetectableObject, kFunction | kBoundFunction | \
+ V(DetectableObject, kArray | kFunction | kBoundFunction | \
kOtherCallable | kOtherObject) \
V(DetectableReceiver, kDetectableObject | kProxy) \
V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 3c79c67fff..a1310ed22f 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -401,23 +401,6 @@ void Verifier::Visitor::Check(Node* node) {
// Type is merged from other values in the graph and could be any.
CheckTypeIs(node, Type::Any());
break;
- case IrOpcode::kOsrGuard:
- // OSR values have a value and a control input.
- CHECK_EQ(1, value_count);
- CHECK_EQ(1, effect_count);
- CHECK_EQ(1, control_count);
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- CheckTypeIs(node, Type::None());
- break;
- case OsrGuardType::kSignedSmall:
- CheckTypeIs(node, Type::SignedSmall());
- break;
- case OsrGuardType::kAny:
- CheckTypeIs(node, Type::Any());
- break;
- }
- break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
int index = static_cast<int>(ProjectionIndexOf(node->op()));
@@ -509,12 +492,19 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(0, control_count);
CHECK_EQ(0, effect_count);
CHECK_EQ(6, input_count);
- for (int i = 0; i < 3; ++i) {
+ // Check that the parameters and registers are kStateValues or
+ // kTypedStateValues.
+ for (int i = 0; i < 2; ++i) {
CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
IrOpcode::kStateValues ||
NodeProperties::GetValueInput(node, i)->opcode() ==
IrOpcode::kTypedStateValues);
}
+ // The accumulator (InputAt(2)) cannot be kStateValues, but it can be
+ // kTypedStateValues (to signal the type). Once AST graph builder
+ // is removed, we should check this here. Until then, AST graph
+ // builder can generate expression stack as InputAt(2), which can
+ // still be kStateValues.
break;
}
case IrOpcode::kStateValues:
@@ -598,12 +588,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Object());
break;
case IrOpcode::kJSCreateArguments:
- // Type is OtherObject.
- CheckTypeIs(node, Type::OtherObject());
+ // Type is Array \/ OtherObject.
+ CheckTypeIs(node, Type::ArrayOrOtherObject());
break;
case IrOpcode::kJSCreateArray:
- // Type is OtherObject.
- CheckTypeIs(node, Type::OtherObject());
+ // Type is Array.
+ CheckTypeIs(node, Type::Array());
break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
@@ -618,6 +608,9 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateLiteralArray:
+ // Type is Array.
+ CheckTypeIs(node, Type::Array());
+ break;
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
@@ -707,6 +700,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
+ case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
@@ -746,6 +740,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckNotTyped(node);
break;
+ case IrOpcode::kJSCreateGeneratorObject:
+ CheckTypeIs(node, Type::OtherObject());
+ break;
+
case IrOpcode::kJSGeneratorRestoreContinuation:
CheckTypeIs(node, Type::SignedSmall());
break;
@@ -1459,6 +1457,7 @@ void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
for (Node* other : node->uses()) {
if (all.IsLive(other) && other != proj &&
other->opcode() == IrOpcode::kProjection &&
+ other->InputAt(0) == node &&
ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s has duplicate projections #%d and #%d",
@@ -1712,10 +1711,11 @@ void Verifier::VerifyNode(Node* node) {
CHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
node->InputCount());
// If this node has no effect or no control outputs,
- // we check that no its uses are effect or control inputs.
+ // we check that none of its uses are effect or control inputs.
bool check_no_control = node->op()->ControlOutputCount() == 0;
bool check_no_effect = node->op()->EffectOutputCount() == 0;
bool check_no_frame_state = node->opcode() != IrOpcode::kFrameState;
+ int effect_edges = 0;
if (check_no_effect || check_no_control) {
for (Edge edge : node->use_edges()) {
Node* const user = edge.from();
@@ -1724,6 +1724,7 @@ void Verifier::VerifyNode(Node* node) {
CHECK(!check_no_control);
} else if (NodeProperties::IsEffectEdge(edge)) {
CHECK(!check_no_effect);
+ effect_edges++;
} else if (NodeProperties::IsFrameStateEdge(edge)) {
CHECK(!check_no_frame_state);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b6b9e3ff05..56c8f6cbef 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -66,7 +66,8 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
- Node** effect_ptr, Node* control) {
+ Node* centry_stub_node, Node** effect_ptr,
+ Node* control) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
if (!trap_handler::UseTrapHandler()) {
@@ -83,7 +84,7 @@ Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- Node* inputs[] = {jsgraph->CEntryStubConstant(fun->result_size),
+ Node* inputs[] = {centry_stub_node,
jsgraph->ExternalConstant(
ExternalReference(f, jsgraph->isolate())), // ref
jsgraph->Int32Constant(fun->nargs), // arity
@@ -100,15 +101,16 @@ Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded context.
Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* context, Node** parameters,
- int parameter_count, Node** effect_ptr,
- Node** control) {
+ Node* centry_stub_node, Node* context,
+ Node** parameters, int parameter_count,
+ Node** effect_ptr, Node** control) {
// Setting and clearing the thread-in-wasm flag should not be done as a normal
// runtime call.
DCHECK_NE(f, Runtime::kSetThreadInWasm);
DCHECK_NE(f, Runtime::kClearThreadInWasm);
// We're leaving Wasm code, so clear the flag.
- *control = BuildModifyThreadInWasmFlag(false, jsgraph, effect_ptr, *control);
+ *control = BuildModifyThreadInWasmFlag(false, jsgraph, centry_stub_node,
+ effect_ptr, *control);
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
@@ -123,7 +125,7 @@ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
DCHECK_GE(kMaxParams, parameter_count);
Node* inputs[kMaxParams + 6];
int count = 0;
- inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
+ inputs[count++] = centry_stub_node;
for (int i = 0; i < parameter_count; i++) {
inputs[count++] = parameters[i];
}
@@ -139,27 +141,30 @@ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
*effect_ptr = node;
// Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control = BuildModifyThreadInWasmFlag(true, jsgraph, effect_ptr, *control);
+ *control = BuildModifyThreadInWasmFlag(true, jsgraph, centry_stub_node,
+ effect_ptr, *control);
return node;
}
Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
- Node** parameters, int parameter_count,
- Node** effect_ptr, Node** control) {
- return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
- parameters, parameter_count, effect_ptr,
- control);
+ Node* centry_stub_node, Node** parameters,
+ int parameter_count, Node** effect_ptr,
+ Node** control) {
+ return BuildCallToRuntimeWithContext(f, jsgraph, centry_stub_node,
+ jsgraph->NoContextConstant(), parameters,
+ parameter_count, effect_ptr, control);
}
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
- wasm::FunctionSig* sig,
+ Handle<Code> centry_stub, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
+ centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
module_(module_env),
signature_tables_(zone),
function_tables_(zone),
@@ -1066,6 +1071,7 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
size_t size_in_bytes) {
switch (size_in_bytes) {
case 4:
+ case 16:
return m->Word32ReverseBytes().IsSupported();
case 8:
return m->Word64ReverseBytes().IsSupported();
@@ -1102,6 +1108,9 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
// No need to change endianness for byte size, return original node
return node;
break;
+ case MachineRepresentation::kSimd128:
+ DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
+ break;
default:
UNREACHABLE();
break;
@@ -1124,6 +1133,27 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
case 8:
result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
break;
+ case 16: {
+ Node* byte_reversed_lanes[4];
+ for (int lane = 0; lane < 4; lane++) {
+ byte_reversed_lanes[lane] = graph()->NewNode(
+ m->Word32ReverseBytes().op(),
+ graph()->NewNode(jsgraph()->machine()->I32x4ExtractLane(lane),
+ value));
+ }
+
+ // This is making a copy of the value.
+ result =
+ graph()->NewNode(jsgraph()->machine()->S128And(), value, value);
+
+ for (int lane = 0; lane < 4; lane++) {
+ result =
+ graph()->NewNode(jsgraph()->machine()->I32x4ReplaceLane(3 - lane),
+ result, byte_reversed_lanes[lane]);
+ }
+
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1696,9 +1726,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Node* parameters[] = {BuildChangeUint32ToSmi(input)};
Node* old_effect = *effect_;
- Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
- parameters, arraysize(parameters), effect_,
- &check_input_range.if_true);
+ Node* call = BuildCallToRuntime(
+ Runtime::kWasmGrowMemory, jsgraph(), centry_stub_node_, parameters,
+ arraysize(parameters), effect_, &check_input_range.if_true);
Node* result = BuildChangeSmiToInt32(call);
@@ -1728,17 +1758,18 @@ Node* WasmGraphBuilder::Throw(Node* input) {
graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
Node* parameters[] = {lower, upper}; // thrown value
- return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
- arraysize(parameters), effect_, control_);
+ return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), centry_stub_node_,
+ parameters, arraysize(parameters), effect_,
+ control_);
}
Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
CommonOperatorBuilder* common = jsgraph()->common();
Node* parameters[] = {input}; // caught value
- Node* value =
- BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
- parameters, arraysize(parameters), effect_, control_);
+ Node* value = BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue,
+ jsgraph(), centry_stub_node_, parameters,
+ arraysize(parameters), effect_, control_);
Node* is_smi;
Node* is_heap;
@@ -2219,7 +2250,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- auto map = const_cast<wasm::SignatureMap&>(
+ auto& map = const_cast<wasm::SignatureMap&>(
module_->module->function_tables[0].map);
Node* sig_match = graph()->NewNode(
machine->WordEqual(), load_sig,
@@ -2588,14 +2619,16 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
graph()->start());
// Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
+ *control_);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the context of the calling javascript function
// (passed as a parameter), such that the generated code is context
// independent.
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- context, nullptr, 0, effect_, control_);
+ centry_stub_node_, context, nullptr, 0,
+ effect_, control_);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2635,7 +2668,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
*effect_ = call;
// Clear the ThreadInWasmFlag
- BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
+ *control_);
Node* retval = call;
Node* jsval = ToJS(
@@ -2673,7 +2707,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* context =
jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- context, nullptr, 0, effect_, control_);
+ centry_stub_node_, context, nullptr, 0,
+ effect_, control_);
// We don't need to return a value here, as the runtime call will not return
// anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
@@ -2684,7 +2719,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* call = nullptr;
- BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
+ *control_);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
@@ -2749,7 +2785,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = call;
SetSourcePosition(call, 0);
- BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
+ *control_);
// Convert the return value back.
Node* val = sig->return_count() == 0
@@ -2834,8 +2871,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
jsgraph()->SmiConstant(function_index), // function index
arg_buffer, // argument buffer
};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
- arraysize(parameters), effect_, control_);
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), centry_stub_node_,
+ parameters, arraysize(parameters), effect_, control_);
// Read back the return value.
if (sig->return_count() == 0) {
@@ -2882,10 +2919,9 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages will not be called from asm.js, hence we cannot be in
// lazy-compilation mode, hence the instance will be set.
DCHECK_EQ(wasm::kWasmOrigin, module_->module->get_origin());
- DCHECK_NOT_NULL(module_);
- DCHECK_NOT_NULL(module_->instance);
- Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), nullptr,
- 0, effect_, control_);
+ Node* call =
+ BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), centry_stub_node_,
+ nullptr, 0, effect_, control_);
Node* result = BuildChangeSmiToInt32(call);
return result;
}
@@ -3188,8 +3224,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->F32x4Abs(), inputs[0]);
case wasm::kExprF32x4Neg:
return graph()->NewNode(jsgraph()->machine()->F32x4Neg(), inputs[0]);
- case wasm::kExprF32x4Sqrt:
- return graph()->NewNode(jsgraph()->machine()->F32x4Sqrt(), inputs[0]);
case wasm::kExprF32x4RecipApprox:
return graph()->NewNode(jsgraph()->machine()->F32x4RecipApprox(),
inputs[0]);
@@ -3199,27 +3233,21 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprF32x4Add:
return graph()->NewNode(jsgraph()->machine()->F32x4Add(), inputs[0],
inputs[1]);
+ case wasm::kExprF32x4AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->F32x4AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprF32x4Sub:
return graph()->NewNode(jsgraph()->machine()->F32x4Sub(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Mul:
return graph()->NewNode(jsgraph()->machine()->F32x4Mul(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4Div:
- return graph()->NewNode(jsgraph()->machine()->F32x4Div(), inputs[0],
- inputs[1]);
case wasm::kExprF32x4Min:
return graph()->NewNode(jsgraph()->machine()->F32x4Min(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Max:
return graph()->NewNode(jsgraph()->machine()->F32x4Max(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4RecipRefine:
- return graph()->NewNode(jsgraph()->machine()->F32x4RecipRefine(),
- inputs[0], inputs[1]);
- case wasm::kExprF32x4RecipSqrtRefine:
- return graph()->NewNode(jsgraph()->machine()->F32x4RecipSqrtRefine(),
- inputs[0], inputs[1]);
case wasm::kExprF32x4Eq:
return graph()->NewNode(jsgraph()->machine()->F32x4Eq(), inputs[0],
inputs[1]);
@@ -3257,6 +3285,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->I32x4Add(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->I32x4AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Sub:
return graph()->NewNode(jsgraph()->machine()->I32x4Sub(), inputs[0],
inputs[1]);
@@ -3330,6 +3361,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI16x8AddSaturateS:
return graph()->NewNode(jsgraph()->machine()->I16x8AddSaturateS(),
inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->I16x8AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(jsgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
@@ -3605,21 +3639,22 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
}
}
-Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+Node* WasmGraphBuilder::SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
const NodeVector& inputs) {
has_simd_ = true;
- switch (opcode) {
- case wasm::kExprS32x4Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S32x4Swizzle(swizzle),
- inputs[0]);
- case wasm::kExprS16x8Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S16x8Swizzle(swizzle),
- inputs[0]);
- case wasm::kExprS8x16Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S8x16Swizzle(swizzle),
- inputs[0]);
+ switch (lanes) {
+ case 4:
+ return graph()->NewNode(jsgraph()->machine()->S32x4Shuffle(shuffle),
+ inputs[0], inputs[1]);
+ case 8:
+ return graph()->NewNode(jsgraph()->machine()->S16x8Shuffle(shuffle),
+ inputs[0], inputs[1]);
+ case 16:
+ return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
+ inputs[0], inputs[1]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ UNREACHABLE();
+ return nullptr;
}
}
@@ -3662,7 +3697,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
Node* effect = nullptr;
wasm::ModuleEnv module_env(module, nullptr);
- WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
+ WasmGraphBuilder builder(&module_env, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildJSToWasmWrapper(wasm_code, func->sig);
@@ -3741,7 +3777,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), sig,
source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
@@ -3792,16 +3829,17 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
const char* function_name = nullptr;
- int function_name_size = 0;
+ size_t function_name_size = 0;
if (!import_name.is_null()) {
Handle<String> handle = import_name.ToHandleChecked();
function_name = handle->ToCString().get();
- function_name_size = handle->length();
+ function_name_size = static_cast<size_t>(handle->length());
}
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
- {module_name->ToCString().get(), module_name->length()},
- {function_name, function_name_size});
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "wasm-to-js", index,
+ {module_name->ToCString().get(),
+ static_cast<size_t>(module_name->length())},
+ {function_name, function_name_size});
}
return code;
@@ -3822,7 +3860,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmInterpreterEntry(func_index, sig, instance);
@@ -3887,14 +3926,14 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
- func_body_.sig, source_position_table);
+ centry_stub_, func_body_.sig, source_position_table);
graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
- os << "Compilation failed: " << graph_construction_result_.error_msg
+ os << "Compilation failed: " << graph_construction_result_.error_msg()
<< std::endl;
}
return nullptr;
@@ -3924,6 +3963,9 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
if (!name.is_empty()) {
return name;
}
+#ifndef DEBUG
+ return {};
+#endif
constexpr int kBufferLength = 15;
EmbeddedVector<char, kBufferLength> name_vector;
@@ -3938,54 +3980,48 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function)
+ const wasm::WasmFunction* function,
+ bool is_sync)
: WasmCompilationUnit(
isolate, &module_env->module_env,
wasm::FunctionBody{
function->sig, module_env->wire_bytes.start(),
module_env->wire_bytes.start() + function->code_start_offset,
module_env->wire_bytes.start() + function->code_end_offset},
- module_env->wire_bytes.GetNameOrNull(function),
- function->func_index) {}
+ module_env->wire_bytes.GetNameOrNull(function), function->func_index,
+ is_sync) {}
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleEnv* module_env,
wasm::FunctionBody body,
- wasm::WasmName name, int index)
+ wasm::WasmName name, int index,
+ bool is_sync)
: isolate_(isolate),
module_env_(module_env),
func_body_(body),
func_name_(name),
- graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
- jsgraph_(new (graph_zone()) JSGraph(
- isolate, new (graph_zone()) Graph(graph_zone()),
- new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
- nullptr,
- new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()))),
- compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(GetDebugName(&compilation_zone_, name, index), isolate,
- &compilation_zone_, Code::ComputeFlags(Code::WASM_FUNCTION)),
- func_index_(index),
- protected_instructions_(&compilation_zone_) {}
-
-void WasmCompilationUnit::InitializeHandles() {
- // Create and cache this node in the main thread, which contains a handle to
- // the code object of the c-entry stub.
- jsgraph_->CEntryStubConstant(1);
- DCHECK(!handles_initialized_);
-#if DEBUG
- handles_initialized_ = true;
-#endif // DEBUG
-}
+ is_sync_(is_sync),
+ centry_stub_(CEntryStub(isolate, 1).GetCode()),
+ func_index_(index) {}
void WasmCompilationUnit::ExecuteCompilation() {
- DCHECK(handles_initialized_);
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // HistogramTimerScope wasm_compile_function_time_scope(
- // isolate_->counters()->wasm_compile_function_time());
+ if (is_sync_) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ HistogramTimerScope wasm_compile_function_time_scope(
+ isolate_->counters()->wasm_compile_function_time());
+ ExecuteCompilationInternal();
+ }
+ ExecuteCompilationInternal();
+ // Record the memory cost this unit places on the system until
+ // it is finalized. That may be "0" in error cases.
+ if (job_) {
+ size_t cost = job_->AllocatedMemory();
+ set_memory_cost(cost);
+ }
+}
+
+void WasmCompilationUnit::ExecuteCompilationInternal() {
if (FLAG_trace_wasm_compiler) {
if (func_name_.start() != nullptr) {
PrintF("Compiling WASM function %d:'%.*s'\n\n", func_index(),
@@ -3998,7 +4034,14 @@ void WasmCompilationUnit::ExecuteCompilation() {
double decode_ms = 0;
size_t node_count = 0;
- std::unique_ptr<Zone> graph_zone(graph_zone_.release());
+ Zone graph_zone(isolate_->allocator(), ZONE_NAME);
+ jsgraph_ = new (&graph_zone) JSGraph(
+ isolate_, new (&graph_zone) Graph(&graph_zone),
+ new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
+ new (&graph_zone) MachineOperatorBuilder(
+ &graph_zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
if (graph_construction_result_.failed()) {
@@ -4012,22 +4055,31 @@ void WasmCompilationUnit::ExecuteCompilation() {
pipeline_timer.Start();
}
+ compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
+
// Run the compiler pipeline to generate machine code.
CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- &compilation_zone_, func_body_.sig);
+ compilation_zone_.get(), func_body_.sig);
if (jsgraph_->machine()->Is32()) {
- descriptor =
- module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ descriptor = module_env_->GetI32WasmCallDescriptor(compilation_zone_.get(),
+ descriptor);
}
+ info_.reset(new CompilationInfo(
+ GetDebugName(compilation_zone_.get(), func_name_, func_index_), isolate_,
+ compilation_zone_.get(), Code::ComputeFlags(Code::WASM_FUNCTION)));
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
+ compilation_zone_.get());
+
job_.reset(Pipeline::NewWasmCompilationJob(
- &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
- !module_env_->module->is_wasm()));
+ info_.get(), jsgraph_, descriptor, source_positions,
+ &protected_instructions, !module_env_->module->is_wasm()));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
- // ->AddSample(
- // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+ if (is_sync_)
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ isolate_->counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
+ static_cast<int>(jsgraph_->graph()->zone()->allocation_size()));
if (FLAG_trace_wasm_decode_time) {
double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
@@ -4037,6 +4089,8 @@ void WasmCompilationUnit::ExecuteCompilation() {
static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
node_count, pipeline_ms);
}
+ // The graph zone is about to get out of scope. Avoid invalid references.
+ jsgraph_ = nullptr;
}
Handle<Code> WasmCompilationUnit::FinishCompilation(
@@ -4064,7 +4118,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation(
if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
return Handle<Code>::null();
}
- Handle<Code> code = info_.code();
+ Handle<Code> code = info_->code();
DCHECK(!code.is_null());
if (isolate_->logger()->is_logging_code_events() ||
@@ -4089,7 +4143,6 @@ Handle<Code> WasmCompilationUnit::CompileWasmFunction(
wasm::ErrorThrower* thrower, Isolate* isolate,
wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function) {
WasmCompilationUnit unit(isolate, module_env, function);
- unit.InitializeHandles();
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 128bfbde00..f356f624d7 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -48,13 +48,14 @@ namespace compiler {
class WasmCompilationUnit final {
public:
WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function);
+ const wasm::WasmFunction* function, bool is_sync = true);
WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
- wasm::FunctionBody body, wasm::WasmName name, int index);
+ wasm::FunctionBody body, wasm::WasmName name, int index,
+ bool is_sync = true);
- Zone* graph_zone() { return graph_zone_.get(); }
int func_index() const { return func_index_; }
+ void ReopenCentryStub() { centry_stub_ = handle(*centry_stub_, isolate_); }
void InitializeHandles();
void ExecuteCompilation();
Handle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
@@ -64,6 +65,9 @@ class WasmCompilationUnit final {
wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function);
+ void set_memory_cost(size_t memory_cost) { memory_cost_ = memory_cost; }
+ size_t memory_cost() const { return memory_cost_; }
+
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
@@ -71,21 +75,22 @@ class WasmCompilationUnit final {
wasm::ModuleEnv* module_env_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
- // The graph zone is deallocated at the end of ExecuteCompilation.
- std::unique_ptr<Zone> graph_zone_;
- JSGraph* jsgraph_;
- Zone compilation_zone_;
- CompilationInfo info_;
+ bool is_sync_;
+ // The graph zone is deallocated at the end of ExecuteCompilation by virtue of
+ // it being zone allocated.
+ JSGraph* jsgraph_ = nullptr;
+ // the compilation_zone_, info_, and job_ fields need to survive past
+ // ExecuteCompilation, onto FinishCompilation (which happens on the main
+ // thread).
+ std::unique_ptr<Zone> compilation_zone_;
+ std::unique_ptr<CompilationInfo> info_;
std::unique_ptr<CompilationJob> job_;
+ Handle<Code> centry_stub_;
int func_index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_ = true;
-#if DEBUG
- bool handles_initialized_ = false;
-#endif // DEBUG
- ZoneVector<trap_handler::ProtectedInstructionData>
- protected_instructions_; // Instructions that are protected by the signal
- // handler.
+ size_t memory_cost_ = 0;
+ void ExecuteCompilationInternal();
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
@@ -115,7 +120,8 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
WasmGraphBuilder(
- wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
+ wasm::ModuleEnv* module_env, Zone* z, JSGraph* g,
+ Handle<Code> centry_stub_, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
@@ -252,7 +258,7 @@ class WasmGraphBuilder {
Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
const NodeVector& inputs);
- Node* SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+ Node* SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
const NodeVector& inputs);
bool has_simd() const { return has_simd_; }
@@ -265,6 +271,7 @@ class WasmGraphBuilder {
Zone* zone_;
JSGraph* jsgraph_;
+ Node* centry_stub_node_;
wasm::ModuleEnv* module_ = nullptr;
Node* mem_buffer_ = nullptr;
Node* mem_size_ = nullptr;
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 3215ec24f7..86c547f460 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -762,7 +762,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(Assembler* assembler,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -770,10 +770,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
+ assembler->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
+ assembler->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -2006,6 +2006,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kX64Movdqu: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset());
+ if (instr->HasOutput()) {
+ __ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movdqu(operand, i.InputSimd128Register(index));
+ }
+ break;
+ }
case kX64BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
@@ -2187,6 +2200,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I32x4AddHoriz: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ __ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2235,7 +2253,205 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, dst);
break;
}
- case kX64S32x4Select: {
+ case kX64I16x8Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movd(dst, i.InputRegister(0));
+ __ pshuflw(dst, dst, 0x0);
+ __ pshufhw(dst, dst, 0x0);
+ __ pshufd(dst, dst, 0x0);
+ break;
+ }
+ case kX64I16x8ExtractLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsxwl(dst, dst);
+ break;
+ }
+ case kX64I16x8ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I16x8Shl: {
+ __ psllw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8ShrS: {
+ __ psraw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8Add: {
+ __ paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8AddSaturateS: {
+ __ paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8AddHoriz: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ __ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Sub: {
+ __ psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8SubSaturateS: {
+ __ psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Mul: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MinS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MaxS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Eq: {
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Ne: {
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I16x8ShrU: {
+ __ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8AddSaturateU: {
+ __ paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8SubSaturateU: {
+ __ psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MinU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MaxU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Splat: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movd(dst, i.InputRegister(0));
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ pshufb(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16ExtractLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsxbl(dst, dst);
+ break;
+ }
+ case kX64I8x16ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I8x16Add: {
+ __ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16AddSaturateS: {
+ __ paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Sub: {
+ __ psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16SubSaturateS: {
+ __ psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MinS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MaxS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Eq: {
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Ne: {
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16AddSaturateU: {
+ __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16SubSaturateU: {
+ __ psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MinU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MaxU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128And: {
+ __ pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Or: {
+ __ por(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Xor: {
+ __ pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Not: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
__ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 0133f80d4b..959a7d2d03 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -132,6 +132,7 @@ namespace compiler {
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
+ V(X64Movdqu) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
@@ -149,6 +150,7 @@ namespace compiler {
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
+ V(X64I32x4AddHoriz) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
@@ -158,7 +160,46 @@ namespace compiler {
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
- V(X64S32x4Select) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLane) \
+ V(X64I16x8ReplaceLane) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSaturateS) \
+ V(X64I16x8AddHoriz) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSaturateS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8AddSaturateU) \
+ V(X64I16x8SubSaturateU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLane) \
+ V(X64I8x16ReplaceLane) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSaturateS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSaturateS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16AddSaturateU) \
+ V(X64I8x16SubSaturateU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Not) \
+ V(X64S128Select) \
V(X64S128Zero)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index b66d853aba..0f4c37f033 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
+ case kX64I32x4AddHoriz:
case kX64I32x4Sub:
case kX64I32x4Mul:
case kX64I32x4MinS:
@@ -138,8 +139,47 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
+ case kX64I16x8Splat:
+ case kX64I16x8ExtractLane:
+ case kX64I16x8ReplaceLane:
+ case kX64I16x8Shl:
+ case kX64I16x8ShrS:
+ case kX64I16x8Add:
+ case kX64I16x8AddSaturateS:
+ case kX64I16x8AddHoriz:
+ case kX64I16x8Sub:
+ case kX64I16x8SubSaturateS:
+ case kX64I16x8Mul:
+ case kX64I16x8MinS:
+ case kX64I16x8MaxS:
+ case kX64I16x8Eq:
+ case kX64I16x8Ne:
+ case kX64I16x8ShrU:
+ case kX64I16x8AddSaturateU:
+ case kX64I16x8SubSaturateU:
+ case kX64I16x8MinU:
+ case kX64I16x8MaxU:
+ case kX64I8x16Splat:
+ case kX64I8x16ExtractLane:
+ case kX64I8x16ReplaceLane:
+ case kX64I8x16Add:
+ case kX64I8x16AddSaturateS:
+ case kX64I8x16Sub:
+ case kX64I8x16SubSaturateS:
+ case kX64I8x16MinS:
+ case kX64I8x16MaxS:
+ case kX64I8x16Eq:
+ case kX64I8x16Ne:
+ case kX64I8x16AddSaturateU:
+ case kX64I8x16SubSaturateU:
+ case kX64I8x16MinU:
+ case kX64I8x16MaxU:
+ case kX64S128And:
+ case kX64S128Or:
+ case kX64S128Xor:
+ case kX64S128Not:
+ case kX64S128Select:
case kX64S128Zero:
- case kX64S32x4Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -181,6 +221,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movq:
case kX64Movsd:
case kX64Movss:
+ case kX64Movdqu:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 89dc956318..3f4e2b3b1c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -26,7 +26,8 @@ class X64OperandGenerator final : public OperandGenerator {
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
- return value == static_cast<int64_t>(static_cast<int32_t>(value));
+ return std::numeric_limits<int32_t>::min() < value &&
+ value <= std::numeric_limits<int32_t>::max();
}
case IrOpcode::kNumberConstant: {
const double value = OpParameter<double>(node);
@@ -230,6 +231,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ opcode = kX64Movdqu;
+ break;
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -265,6 +268,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
return kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ return kX64Movdqu;
+ break;
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -278,6 +283,15 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -2438,7 +2452,15 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) V(I32x4)
+#define SIMD_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
#define SIMD_ZERO_OP_LIST(V) \
V(S128Zero) \
@@ -2446,13 +2468,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(S1x8Zero) \
V(S1x16Zero)
-#define SIMD_SHIFT_OPCODES(V) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU)
-
#define SIMD_BINOP_LIST(V) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2460,7 +2478,46 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4MinU) \
- V(I32x4MaxU)
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_UNOP_LIST(V) V(S128Not)
+
+#define SIMD_SHIFT_OPCODES(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU)
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -2510,6 +2567,15 @@ SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
+#define VISIT_SIMD_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
+#undef VISIT_SIMD_UNOP
+
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2519,12 +2585,15 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
-void InstructionSelector::VisitS32x4Select(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64S32x4Select, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)));
-}
+#define SIMD_VISIT_SELECT_OP(format) \
+ void InstructionSelector::VisitS##format##Select(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64S128Select, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
+#undef SIMD_VISIT_SELECT_OP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index c11ac287d0..b5594b8894 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -168,6 +168,14 @@ class X87OperandGenerator final : public OperandGenerator {
}
};
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
diff --git a/deps/v8/src/compiler/zone-stats.cc b/deps/v8/src/compiler/zone-stats.cc
index 8942df5555..626ad4072c 100644
--- a/deps/v8/src/compiler/zone-stats.cc
+++ b/deps/v8/src/compiler/zone-stats.cc
@@ -68,11 +68,11 @@ ZoneStats::~ZoneStats() {
DCHECK(stats_.empty());
}
-size_t ZoneStats::GetMaxAllocatedBytes() {
+size_t ZoneStats::GetMaxAllocatedBytes() const {
return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
}
-size_t ZoneStats::GetCurrentAllocatedBytes() {
+size_t ZoneStats::GetCurrentAllocatedBytes() const {
size_t total = 0;
for (Zone* zone : zones_) {
total += static_cast<size_t>(zone->allocation_size());
@@ -80,7 +80,7 @@ size_t ZoneStats::GetCurrentAllocatedBytes() {
return total;
}
-size_t ZoneStats::GetTotalAllocatedBytes() {
+size_t ZoneStats::GetTotalAllocatedBytes() const {
return total_deleted_bytes_ + GetCurrentAllocatedBytes();
}
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index 39adca3693..6e0cd5fe4e 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE ZoneStats final {
explicit ZoneStats(AccountingAllocator* allocator);
~ZoneStats();
- size_t GetMaxAllocatedBytes();
- size_t GetTotalAllocatedBytes();
- size_t GetCurrentAllocatedBytes();
+ size_t GetMaxAllocatedBytes() const;
+ size_t GetTotalAllocatedBytes() const;
+ size_t GetCurrentAllocatedBytes() const;
private:
Zone* NewEmptyZone(const char* zone_name);