summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2019-08-16 11:32:46 +0200
committerMichaël Zasso <targos@protonmail.com>2019-08-19 09:25:23 +0200
commite31f0a7d25668d3c1531294d2ef44a9f3bde4ef4 (patch)
tree6c6bed9804be9df6162b2483f0a56f371f66464d /deps/v8/src/compiler
parentec16fdae540adaf710b1a86c620170b2880088f0 (diff)
downloadnode-new-e31f0a7d25668d3c1531294d2ef44a9f3bde4ef4.tar.gz
deps: update V8 to 7.7.299.4
PR-URL: https://github.com/nodejs/node/pull/28918 Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/STYLE29
-rw-r--r--deps/v8/src/compiler/access-builder.cc28
-rw-r--r--deps/v8/src/compiler/access-builder.h11
-rw-r--r--deps/v8/src/compiler/access-info.cc42
-rw-r--r--deps/v8/src/compiler/access-info.h35
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.cc51
-rw-r--r--deps/v8/src/compiler/add-type-assertions-reducer.h45
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc68
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc73
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc1
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc80
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc4
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc64
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc3
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc9
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector-impl.h12
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc105
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h22
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc5
-rw-r--r--deps/v8/src/compiler/backend/instruction.h6
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.h13
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc24
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc9
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc70
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc3
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc19
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc88
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc1
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc9
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc121
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h36
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc63
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc33
-rw-r--r--deps/v8/src/compiler/backend/unwinding-info-writer.h1
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc352
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc30
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc122
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc94
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h45
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc324
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h11
-rw-r--r--deps/v8/src/compiler/code-assembler.cc32
-rw-r--r--deps/v8/src/compiler/code-assembler.h123
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/common-operator.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.h1
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc56
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h15
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h32
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h7
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc336
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.h118
-rw-r--r--deps/v8/src/compiler/decompression-elimination.cc37
-rw-r--r--deps/v8/src/compiler/decompression-elimination.h5
-rw-r--r--deps/v8/src/compiler/diamond.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc320
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc54
-rw-r--r--deps/v8/src/compiler/escape-analysis.h11
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.h38
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc12
-rw-r--r--deps/v8/src/compiler/graph-reducer.h8
-rw-r--r--deps/v8/src/compiler/heap-refs.h906
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc37
-rw-r--r--deps/v8/src/compiler/int64-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc1265
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h17
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc20
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-graph.cc8
-rw-r--r--deps/v8/src/compiler/js-graph.h52
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc909
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h831
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc66
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc17
-rw-r--r--deps/v8/src/compiler/js-inlining.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc320
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h35
-rw-r--r--deps/v8/src/compiler/js-operator.cc15
-rw-r--r--deps/v8/src/compiler/js-operator.h14
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc58
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc18
-rw-r--r--deps/v8/src/compiler/linkage.cc8
-rw-r--r--deps/v8/src/compiler/linkage.h2
-rw-r--r--deps/v8/src/compiler/load-elimination.cc28
-rw-r--r--deps/v8/src/compiler/load-elimination.h2
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc20
-rw-r--r--deps/v8/src/compiler/loop-analysis.h6
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc13
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc3
-rw-r--r--deps/v8/src/compiler/machine-operator.cc55
-rw-r--r--deps/v8/src/compiler/machine-operator.h42
-rw-r--r--deps/v8/src/compiler/map-inference.cc25
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc45
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h6
-rw-r--r--deps/v8/src/compiler/node-properties.cc3
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/node.cc8
-rw-r--r--deps/v8/src/compiler/opcodes.h53
-rw-r--r--deps/v8/src/compiler/operation-typer.cc31
-rw-r--r--deps/v8/src/compiler/operation-typer.h6
-rw-r--r--deps/v8/src/compiler/pipeline.cc307
-rw-r--r--deps/v8/src/compiler/pipeline.h15
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc13
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc4
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h5
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc3
-rw-r--r--deps/v8/src/compiler/representation-change.cc192
-rw-r--r--deps/v8/src/compiler/representation-change.h51
-rw-r--r--deps/v8/src/compiler/scheduler.cc38
-rw-r--r--deps/v8/src/compiler/scheduler.h10
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc1402
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.h329
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc29
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h9
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc175
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h8
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc112
-rw-r--r--deps/v8/src/compiler/simplified-operator.h29
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc8
-rw-r--r--deps/v8/src/compiler/state-values-utils.h4
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc16
-rw-r--r--deps/v8/src/compiler/store-store-elimination.h6
-rw-r--r--deps/v8/src/compiler/typer.cc30
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/types.cc14
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc46
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc712
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h51
148 files changed, 8437 insertions, 3689 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 39beced3f3..50e2af7129 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -1,5 +1,3 @@
-set noparent
-
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
@@ -19,6 +17,7 @@ per-file wasm-*=gdeepti@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
-per-file simd-scalar-lowering.*=aseemgarg@chromium.org
+per-file simd-scalar-lowering.*=bbudge@chromium.org
+per-file simd-scalar-lowering.*=gdeepti@chromium.org
# COMPONENT: Blink>JavaScript>Compiler
diff --git a/deps/v8/src/compiler/STYLE b/deps/v8/src/compiler/STYLE
deleted file mode 100644
index ae41e3f989..0000000000
--- a/deps/v8/src/compiler/STYLE
+++ /dev/null
@@ -1,29 +0,0 @@
-Compiler Coding Style
-=====================
-
-Coding style for the TurboFan compiler generally follows the Google C++ Style
-Guide and the Chromium Coding Style. The notes below are usually just extensions
-beyond what the Google style guide already says. If this document doesn't
-mention a rule, follow the Google C++ style.
-
-
-TODOs
------
-We use the following convention for putting TODOs into the code:
-
- * A TODO(turbofan) implies a performance improvement opportunity.
- * A TODO(name) implies an incomplete implementation.
-
-
-Use of C++11 auto keyword
--------------------------
-Use auto to avoid type names that are just clutter. Continue to use manifest
-type declarations when it helps readability, and never use auto for anything
-but local variables, in particular auto should only be used where it is obvious
-from context what the type is:
-
- for (auto block : x->blocks()) // clearly a Block of some kind
- for (auto instr : x->instructions()) // clearly an Instruction of some kind
-
- for (auto b : x->predecessors()) // less clear, better to make it explicit
- for (BasicBlock* b : x->predecessors()) // now clear
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 726a81a465..a369de4885 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -14,9 +14,9 @@
#include "src/objects/heap-number.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-generator.h"
-#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
+#include "src/objects/source-text-module.h"
namespace v8 {
namespace internal {
@@ -72,6 +72,26 @@ FieldAccess AccessBuilder::ForBigIntBitfield() {
}
// static
+FieldAccess AccessBuilder::ForBigIntOptionalPadding() {
+ DCHECK_EQ(FIELD_SIZE(BigInt::kOptionalPaddingOffset), 4);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kOptionalPaddingOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kInt32, MachineType::Uint32(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForBigIntLeastSignificantDigit64() {
+ DCHECK_EQ(BigInt::SizeFor(1) - BigInt::SizeFor(0), 8);
+ FieldAccess access = {
+ kTaggedBase, BigInt::kDigitsOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get()->kBigUint64, MachineType::Uint64(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {
kTaggedBase, JSObject::kPropertiesOrHashOffset,
@@ -626,7 +646,7 @@ FieldAccess AccessBuilder::ForMapPrototype() {
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularExportsOffset,
+ kTaggedBase, SourceTextModule::kRegularExportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -636,7 +656,7 @@ FieldAccess AccessBuilder::ForModuleRegularExports() {
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
FieldAccess access = {
- kTaggedBase, Module::kRegularImportsOffset,
+ kTaggedBase, SourceTextModule::kRegularImportsOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TypeCompressedTaggedPointer(),
kPointerWriteBarrier};
@@ -847,7 +867,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
// static
FieldAccess AccessBuilder::ForValue() {
FieldAccess access = {
- kTaggedBase, JSValue::kValueOffset,
+ kTaggedBase, JSPrimitiveWrapper::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::NonInternal(), MachineType::TypeCompressedTagged(),
kFullWriteBarrier};
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e38c487b1a..e3a17fe257 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -42,6 +42,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to BigInt's bit field.
static FieldAccess ForBigIntBitfield();
+ // Provides access to BigInt's 32 bit padding that is placed after the
+ // bitfield on 64 bit architectures without pointer compression. Do not use
+ // this on 32 bit architectures.
+ static FieldAccess ForBigIntOptionalPadding();
+
+ // Provides access to BigInt's least significant digit on 64 bit
+ // architectures. Do not use this on 32 bit architectures.
+ static FieldAccess ForBigIntLeastSignificantDigit64();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
@@ -263,7 +272,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSStringIterator::index() field.
static FieldAccess ForJSStringIteratorIndex();
- // Provides access to JSValue::value() field.
+ // Provides access to JSPrimitiveWrapper::value() field.
static FieldAccess ForValue();
// Provides access to Cell::value() field.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 713484f734..6fc9e8214e 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,6 +8,7 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
#include "src/logging/counters.h"
@@ -78,7 +79,7 @@ PropertyAccessInfo PropertyAccessInfo::NotFound(Zone* zone,
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -90,7 +91,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies,
+ ZoneVector<CompilationDependency const*>&& dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -156,8 +157,7 @@ PropertyAccessInfo::PropertyAccessInfo(
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map,
ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies)
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies)
: kind_(kind),
receiver_maps_(receiver_maps),
unrecorded_dependencies_(std::move(unrecorded_dependencies)),
@@ -258,11 +258,6 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
}
}
-Handle<Cell> PropertyAccessInfo::export_cell() const {
- DCHECK_EQ(kModuleExport, kind_);
- return Handle<Cell>::cast(constant_);
-}
-
AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
CompilationDependencies* dependencies,
Zone* zone)
@@ -336,11 +331,10 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef map_ref(broker(), map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
+ map_ref.SerializeOwnDescriptor(descriptor);
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
@@ -360,19 +354,23 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
}
- map_ref.SerializeOwnDescriptor(descriptor);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
descriptor));
if (descriptors_field_type->IsClass()) {
- unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
// Remember the field map, and try to infer a useful type.
Handle<Map> map(descriptors_field_type->AsClass(), isolate());
field_type = Type::For(MapRef(broker(), map));
field_map = MaybeHandle<Map>(map);
}
+ } else {
+ CHECK(details_representation.IsTagged());
}
+ // TODO(turbofan): We may want to do this only depending on the use
+ // of the access info.
+ unrecorded_dependencies.push_back(
+ dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
+
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
constness = PropertyConstness::kConst;
@@ -445,9 +443,6 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
DCHECK_IMPLIES(lookup == CallOptimization::kHolderIsReceiver,
holder.is_null());
DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
- return PropertyAccessInfo::Invalid(zone());
- }
}
if (access_mode == AccessMode::kLoad) {
Handle<Name> cached_property_name;
@@ -569,7 +564,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
if (map_prototype->map().is_deprecated()) {
// Try to migrate the prototype object so we don't embed the deprecated
// map into the optimized code.
- JSObject::TryMigrateInstance(map_prototype);
+ JSObject::TryMigrateInstance(isolate(), map_prototype);
}
map = handle(map_prototype->map(), isolate());
holder = map_prototype;
@@ -611,8 +606,7 @@ void AccessInfoFactory::ComputePropertyAccessInfos(
void PropertyAccessInfo::RecordDependencies(
CompilationDependencies* dependencies) {
- for (CompilationDependencies::Dependency const* d :
- unrecorded_dependencies_) {
+ for (CompilationDependency const* d : unrecorded_dependencies_) {
dependencies->RecordDependency(d);
}
unrecorded_dependencies_.clear();
@@ -648,6 +642,8 @@ void AccessInfoFactory::MergePropertyAccessInfos(
CHECK(!result->empty());
}
+Isolate* AccessInfoFactory::isolate() const { return broker()->isolate(); }
+
namespace {
Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
@@ -760,8 +756,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
MapRef transition_map_ref(broker(), transition_map);
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies(zone());
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
transition_map_ref.SerializeOwnDescriptor(number);
@@ -796,6 +791,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
unrecorded_dependencies.push_back(
dependencies()->TransitionDependencyOffTheRecord(
MapRef(broker(), transition_map)));
+ transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore.
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 3499069fc4..4c7c3611df 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -8,7 +8,6 @@
#include <iosfwd>
#include "src/codegen/machine-type.h"
-#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/types.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/field-index.h"
@@ -25,8 +24,10 @@ class Factory;
namespace compiler {
// Forward declarations.
+class CompilationDependencies;
+class CompilationDependency;
class ElementAccessFeedback;
-class Type;
+class JSHeapBroker;
class TypeCache;
std::ostream& operator<<(std::ostream&, AccessMode);
@@ -74,16 +75,14 @@ class PropertyAccessInfo final {
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
static PropertyAccessInfo DataConstant(
Zone* zone, Handle<Map> receiver_map,
- ZoneVector<CompilationDependencies::Dependency const*>&&
- unrecorded_dependencies,
+ ZoneVector<CompilationDependency const*>&& unrecorded_dependencies,
FieldIndex field_index, Representation field_representation,
Type field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
@@ -113,9 +112,9 @@ class PropertyAccessInfo final {
Kind kind() const { return kind_; }
MaybeHandle<JSObject> holder() const {
- // This CHECK tries to protect against using the access info without
- // recording its dependencies first.
- CHECK(unrecorded_dependencies_.empty());
+ // TODO(neis): There was a CHECK here that tries to protect against
+ // using the access info without recording its dependencies first.
+ // Find a more suitable place for it.
return holder_;
}
MaybeHandle<Map> transition_map() const { return transition_map_; }
@@ -127,7 +126,6 @@ class PropertyAccessInfo final {
ZoneVector<Handle<Map>> const& receiver_maps() const {
return receiver_maps_;
}
- Handle<Cell> export_cell() const;
private:
explicit PropertyAccessInfo(Zone* zone);
@@ -136,17 +134,16 @@ class PropertyAccessInfo final {
PropertyAccessInfo(Zone* zone, Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
ZoneVector<Handle<Map>>&& receiver_maps);
- PropertyAccessInfo(
- Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
- FieldIndex field_index, Representation field_representation,
- Type field_type, MaybeHandle<Map> field_map,
- ZoneVector<Handle<Map>>&& receiver_maps,
- ZoneVector<CompilationDependencies::Dependency const*>&& dependencies);
+ PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
+ MaybeHandle<Map> transition_map, FieldIndex field_index,
+ Representation field_representation, Type field_type,
+ MaybeHandle<Map> field_map,
+ ZoneVector<Handle<Map>>&& receiver_maps,
+ ZoneVector<CompilationDependency const*>&& dependencies);
Kind kind_;
ZoneVector<Handle<Map>> receiver_maps_;
- ZoneVector<CompilationDependencies::Dependency const*>
- unrecorded_dependencies_;
+ ZoneVector<CompilationDependency const*> unrecorded_dependencies_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
@@ -215,7 +212,7 @@ class AccessInfoFactory final {
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
- Isolate* isolate() const { return broker()->isolate(); }
+ Isolate* isolate() const;
Zone* zone() const { return zone_; }
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.cc b/deps/v8/src/compiler/add-type-assertions-reducer.cc
new file mode 100644
index 0000000000..59d2fe6820
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.cc
@@ -0,0 +1,51 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/add-type-assertions-reducer.h"
+
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AddTypeAssertionsReducer::AddTypeAssertionsReducer(Editor* editor,
+ JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ visited_(jsgraph->graph()->NodeCount(), zone) {}
+
+AddTypeAssertionsReducer::~AddTypeAssertionsReducer() = default;
+
+Reduction AddTypeAssertionsReducer::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kAssertType ||
+ node->opcode() == IrOpcode::kPhi || !NodeProperties::IsTyped(node) ||
+ visited_.Get(node)) {
+ return NoChange();
+ }
+ visited_.Set(node, true);
+
+ Type type = NodeProperties::GetType(node);
+ if (!type.IsRange()) {
+ return NoChange();
+ }
+
+ Node* assertion = graph()->NewNode(simplified()->AssertType(type), node);
+ NodeProperties::SetType(assertion, type);
+
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ DCHECK(!user->IsDead());
+ if (NodeProperties::IsValueEdge(edge) && user != assertion) {
+ edge.UpdateTo(assertion);
+ Revisit(user);
+ }
+ }
+
+ return NoChange();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/add-type-assertions-reducer.h b/deps/v8/src/compiler/add-type-assertions-reducer.h
new file mode 100644
index 0000000000..36add040e1
--- /dev/null
+++ b/deps/v8/src/compiler/add-type-assertions-reducer.h
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+#define V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
+
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE AddTypeAssertionsReducer final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ AddTypeAssertionsReducer(Editor* editor, JSGraph* jsgraph, Zone* zone);
+ ~AddTypeAssertionsReducer() final;
+
+ const char* reducer_name() const override {
+ return "AddTypeAssertionsReducer";
+ }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ JSGraph* const jsgraph_;
+ NodeAuxData<bool> visited_;
+
+ Graph* graph() { return jsgraph_->graph(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ DISALLOW_COPY_AND_ASSIGN(AddTypeAssertionsReducer);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_ADD_TYPE_ASSERTIONS_REDUCER_H_
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index d93053c64b..88a9c52a33 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -130,6 +130,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
case Constant::kInt64:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(dcarney): loading RPO constants on arm.
case Constant::kRpoNumber:
@@ -308,9 +309,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -319,9 +320,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
}
}
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter& i, Register address) {
+void ComputePoisonedAddressForLoad(
+ CodeGenerator* codegen, InstructionCode opcode,
+ ArmOperandConverter& i, // NOLINT(runtime/references)
+ Register address) {
DCHECK_EQ(kMemoryAccessPoisoned,
static_cast<MemoryAccessMode>(MiscField::decode(opcode)));
switch (AddressingModeField::decode(opcode)) {
@@ -711,8 +713,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -879,23 +881,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r1);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1752,6 +1752,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArmDmbIsh: {
+ __ dmb(ISH);
+ break;
+ }
case kArmDsbIsb: {
__ dsb(SY);
__ isb(SY);
@@ -2588,6 +2592,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU32, scratch, src.low(), src.high());
__ vpmax(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x4AllTrue: {
@@ -2597,6 +2603,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU32, scratch, src.low(), src.high());
__ vpmin(NeonU32, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AnyTrue: {
@@ -2607,6 +2615,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmax(NeonU16, scratch, scratch, scratch);
__ vpmax(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x8AllTrue: {
@@ -2617,6 +2627,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU16, scratch, scratch, scratch);
__ vpmin(NeonU16, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS16, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AnyTrue: {
@@ -2631,6 +2643,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// kDoubleRegZero is not changed, since it is 0.
__ vtst(Neon32, q_scratch, q_scratch, q_scratch);
__ ExtractLane(i.OutputRegister(), d_scratch, NeonS32, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kArmS1x16AllTrue: {
@@ -2642,6 +2656,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpmin(NeonU8, scratch, scratch, scratch);
__ vpmin(NeonU8, scratch, scratch, scratch);
__ ExtractLane(i.OutputRegister(), scratch, NeonS8, 0);
+ __ cmp(i.OutputRegister(), Operand(0));
+ __ mov(i.OutputRegister(), Operand(1), LeaveCC, ne);
break;
}
case kWord32AtomicLoadInt8:
@@ -2901,7 +2917,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2993,8 +3009,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(lr, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ } else {
+ __ Push(lr, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3025,8 +3047,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3074,7 +3096,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 722502edc7..165ca39f9d 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -126,6 +126,7 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke) \
V(ArmPeek) \
+ V(ArmDmbIsh) \
V(ArmDsbIsb) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index 211abd85b8..41d7b4055f 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -275,6 +275,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmStr:
case kArmPush:
case kArmPoke:
+ case kArmDmbIsh:
case kArmDsbIsb:
case kArmWord32AtomicPairStore:
case kArmWord32AtomicPairAdd:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 678d75ae5e..06aba4491a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -441,9 +441,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2020,6 +2020,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmDmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArmOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 53864ad2e9..c71a63cc3d 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -224,6 +224,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
return Operand(Operand::EmbeddedNumber(constant.ToFloat64().value()));
case Constant::kExternalReference:
return Operand(constant.ToExternalReference());
+ case Constant::kCompressedHeapObject: // Fall through.
case Constant::kHeapObject:
return Operand(constant.ToHeapObject());
case Constant::kDelayedStringConstant:
@@ -375,9 +376,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ Arm64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -621,8 +622,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -793,19 +794,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0).is(x1));
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ Debug("kArchDebugAbort", 0, BREAK);
+ __ Debug("kArchAbortCSAAssert", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -867,9 +866,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
@@ -1629,6 +1625,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64DmbIsh:
+ __ Dmb(InnerShareable, BarrierAll);
+ break;
case kArm64DsbIsb:
__ Dsb(FullSystem, BarrierAll);
__ Isb();
@@ -2200,6 +2199,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
VRegister temp = scope.AcquireV(format); \
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
__ Umov(i.OutputRegister32(), temp, 0); \
+ __ Cmp(i.OutputRegister32(), 0); \
+ __ Cset(i.OutputRegister32(), ne); \
break; \
}
SIMD_REDUCE_OP_CASE(kArm64S1x4AnyTrue, Umaxv, kFormatS, 4S);
@@ -2399,12 +2400,14 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
__ Adr(temp, &table);
__ Add(temp, temp, Operand(input, UXTW, 2));
__ Br(temp);
- __ StartBlockPools();
- __ Bind(&table);
- for (size_t index = 0; index < case_count; ++index) {
- __ B(GetLabel(i.InputRpo(index + 2)));
+ {
+ TurboAssembler::BlockPoolsScope block_pools(tasm(),
+ case_count * kInstrSize);
+ __ Bind(&table);
+ for (size_t index = 0; index < case_count; ++index) {
+ __ B(GetLabel(i.InputRpo(index + 2)));
+ }
}
- __ EndBlockPools();
}
void CodeGenerator::FinishFrame(Frame* frame) {
@@ -2437,8 +2440,8 @@ void CodeGenerator::AssembleConstructFrame() {
// The frame has been previously padded in CodeGenerator::FinishFrame().
DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
call_descriptor->CalleeSavedRegisters());
@@ -2577,7 +2580,17 @@ void CodeGenerator::AssembleConstructFrame() {
MemOperand(fp, WasmCompiledFrameConstants::kWasmInstanceOffset));
} break;
case CallDescriptor::kCallAddress:
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ required_slots += 2; // marker + saved c_entry_fp.
+ }
__ Claim(required_slots);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
+ __ Str(scratch,
+ MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
+ }
break;
default:
UNREACHABLE();
@@ -2654,7 +2667,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
__ Ret();
}
-void CodeGenerator::FinishCode() { __ CheckConstPool(true, false); }
+void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
@@ -2669,6 +2682,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else {
__ Mov(dst, src_object);
}
+ } else if (src.type() == Constant::kCompressedHeapObject) {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ // TODO(v8:8977): Even though this mov happens on 32 bits (Note the
+ // .W()) and we are passing along the RelocInfo, we still haven't made
+ // the address embedded in the code-stream actually be compressed.
+ __ Mov(dst.W(),
+ Immediate(src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT));
+ }
} else {
__ Mov(dst, g.ToImmediate(source));
}
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 4b7b017111..1c4c0e3335 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -171,6 +171,7 @@ namespace compiler {
V(Arm64CompressSigned) \
V(Arm64CompressPointer) \
V(Arm64CompressAny) \
+ V(Arm64DmbIsh) \
V(Arm64DsbIsb) \
V(Arm64F32x4Splat) \
V(Arm64F32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 502b9d7d82..8344887ec2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -319,6 +319,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64DmbIsh:
case kArm64DsbIsb:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 69d82b4993..a953e35a66 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -535,9 +535,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -676,10 +676,11 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
- // must check kArithmeticImm as well as kLoadStoreImm64.
- if (g.CanBeImmediate(index, kArithmeticImm) &&
- g.CanBeImmediate(index, kLoadStoreImm64)) {
+ // OutOfLineRecordWrite uses the index in an add or sub instruction, but we
+ // can trust the assembler to generate extra instructions if the index does
+ // not fit into add or sub. So here only check the immediate for a store.
+ if (g.CanBeImmediate(index, COMPRESS_POINTERS_BOOL ? kLoadStoreImm32
+ : kLoadStoreImm64)) {
inputs[input_count++] = g.UseImmediate(index);
addressing_mode = kMode_MRI;
} else {
@@ -1599,7 +1600,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// 32-bit operations will write their result in a W register (implicitly
// clearing the top 32-bit of the corresponding X register) so the
// zero-extension is a no-op.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
case IrOpcode::kLoad: {
@@ -1610,7 +1611,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
default:
break;
@@ -1646,29 +1647,75 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ InstructionCode opcode = kArm64LdrDecompressAnyTagged;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressed;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressAny, g.DefineAsRegister(node), g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressPointer, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ InstructionCode opcode = kArm64LdrDecompressTaggedPointer;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedPointer;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressPointer, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
Arm64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kArm64DecompressSigned, g.DefineAsRegister(node), g.UseRegister(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ InstructionCode opcode = kArm64LdrDecompressTaggedSigned;
+ if (value->opcode() == IrOpcode::kPoisonedLoad) {
+ CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
+ opcode |= MiscField::encode(kMemoryAccessPoisoned);
+ }
+ ImmediateMode immediate_mode = kLoadStoreImm32;
+ MachineRepresentation rep = MachineRepresentation::kCompressedSigned;
+ EmitLoad(this, value, opcode, immediate_mode, rep, node);
+ } else {
+ Emit(kArm64DecompressSigned, g.DefineAsRegister(node),
+ g.UseRegister(value));
+ }
}
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
- Node* value = node->InputAt(0);
// The top 32 bits in the 64-bit register will be undefined, and
// must not be used by a dependent node.
- Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
+ EmitIdentity(node);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
@@ -2451,7 +2498,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
size_t table_time_cost = 3;
size_t lookup_space_cost = 3 + 2 * sw.case_count();
size_t lookup_time_cost = sw.case_count();
- if (sw.case_count() > 0 &&
+ if (sw.case_count() > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
sw.min_value() > std::numeric_limits<int32_t>::min() &&
@@ -2755,6 +2802,11 @@ void InstructionSelector::VisitFloat64Mul(Node* node) {
return VisitRRR(this, kArm64Float64Mul, node);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64DmbIsh, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index bb83a8497b..9ce92dadaa 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -1210,6 +1210,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
+ case Constant::kCompressedHeapObject:
+ DCHECK_EQ(MachineRepresentation::kCompressed, type.representation());
+ literal = DeoptimizationLiteral(constant.ToHeapObject());
+ break;
case Constant::kDelayedStringConstant:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
literal = DeoptimizationLiteral(constant.ToDelayedStringConstant());
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 0e61c22cbb..ed4be7a47c 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -81,6 +81,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
+ case Constant::kCompressedHeapObject:
+ break;
case Constant::kDelayedStringConstant:
return Immediate::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
@@ -462,6 +464,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
}
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ Register dst = i.OutputRegister(); \
+ Operand src = i.InputOperand(0); \
+ Register tmp = i.TempRegister(0); \
+ __ mov(tmp, Immediate(1)); \
+ __ xor_(dst, dst); \
+ __ Pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, src); \
+ __ Ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmov(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
@@ -674,8 +689,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -870,17 +885,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == edx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
break;
@@ -1204,7 +1217,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchWordPoisonOnSpeculation:
// TODO(860429): Remove remaining poisoning infrastructure on ia32.
UNREACHABLE();
- case kLFence:
+ case kIA32MFence:
+ __ mfence();
+ break;
+ case kIA32LFence:
__ lfence();
break;
case kSSEFloat32Cmp:
@@ -3663,18 +3679,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmov(zero, dst, tmp);
break;
}
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
case kIA32S1x4AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqd);
+ break;
case kIA32S1x8AllTrue:
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
case kIA32S1x16AllTrue: {
- Register dst = i.OutputRegister();
- Operand src = i.InputOperand(0);
- Register tmp = i.TempRegister(0);
- __ mov(tmp, Immediate(1));
- __ xor_(dst, dst);
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(kScratchDoubleReg, src);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmov(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kIA32StackCheck: {
@@ -4224,6 +4240,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ push(ebp);
__ mov(ebp, esp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -4254,8 +4275,8 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -4629,6 +4650,7 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 60ed1cc29c..56dea82fe2 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -44,7 +44,8 @@ namespace compiler {
V(IA32Tzcnt) \
V(IA32Popcnt) \
V(IA32Bswap) \
- V(LFence) \
+ V(IA32MFence) \
+ V(IA32LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index f2d5cc0d17..15f69b991c 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -365,7 +365,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32PushFloat64:
case kIA32PushSimd128:
case kIA32Poke:
- case kLFence:
+ case kIA32MFence:
+ case kIA32LFence:
return kHasSideEffect;
case kIA32Word32AtomicPairLoad:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f81b88823e..e1fc66b4ba 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -272,9 +272,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1593,6 +1593,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 068164b57e..1085de2196 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -82,7 +82,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchDebugAbort) \
+ V(ArchAbortCSAAssert) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index b0637c175d..538af71bb4 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -298,7 +298,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchTailCallCodeObject:
case kArchTailCallAddress:
case kArchTailCallWasm:
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
case kArchDebugBreak:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/instruction-selector-impl.h b/deps/v8/src/compiler/backend/instruction-selector-impl.h
index 21edc2f503..a3f62e7ba4 100644
--- a/deps/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/backend/instruction-selector-impl.h
@@ -29,8 +29,8 @@ inline bool operator<(const CaseInfo& l, const CaseInfo& r) {
// Helper struct containing data about a table or lookup switch.
class SwitchInfo {
public:
- SwitchInfo(ZoneVector<CaseInfo>& cases, int32_t min_value, int32_t max_value,
- BasicBlock* default_branch)
+ SwitchInfo(ZoneVector<CaseInfo>& cases, // NOLINT(runtime/references)
+ int32_t min_value, int32_t max_value, BasicBlock* default_branch)
: cases_(cases),
min_value_(min_value),
max_value_(max_value),
@@ -109,13 +109,9 @@ class OperandGenerator {
}
InstructionOperand DefineAsConstant(Node* node) {
- return DefineAsConstant(node, ToConstant(node));
- }
-
- InstructionOperand DefineAsConstant(Node* node, Constant constant) {
selector()->MarkAsDefined(node);
int virtual_register = GetVReg(node);
- sequence()->AddConstant(virtual_register, constant);
+ sequence()->AddConstant(virtual_register, ToConstant(node));
return ConstantOperand(virtual_register);
}
@@ -326,6 +322,8 @@ class OperandGenerator {
}
case IrOpcode::kHeapConstant:
return Constant(HeapConstantOf(node->op()));
+ case IrOpcode::kCompressedHeapConstant:
+ return Constant(HeapConstantOf(node->op()), true);
case IrOpcode::kDelayedStringConstant:
return Constant(StringConstantBaseOf(node->op()));
case IrOpcode::kDeadValue: {
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index 2b748a188b..11ba910405 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -8,6 +8,7 @@
#include "src/base/adapters.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
@@ -24,7 +25,7 @@ InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode, Features features,
EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
@@ -54,7 +55,8 @@ InstructionSelector::InstructionSelector(
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
- trace_turbo_(trace_turbo) {
+ trace_turbo_(trace_turbo),
+ tick_counter_(tick_counter) {
instructions_.reserve(node_count);
continuation_inputs_.reserve(5);
continuation_outputs_.reserve(2);
@@ -1078,7 +1080,8 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
- node->opcode() == IrOpcode::kProtectedStore) {
+ node->opcode() == IrOpcode::kProtectedStore ||
+ node->opcode() == IrOpcode::kMemoryBarrier) {
++effect_level;
}
}
@@ -1251,6 +1254,7 @@ void InstructionSelector::MarkPairProjectionsAsWord32(Node* node) {
}
void InstructionSelector::VisitNode(Node* node) {
+ tick_counter_->DoTick();
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
switch (node->opcode()) {
case IrOpcode::kStart:
@@ -1301,6 +1305,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsReference(node), VisitConstant(node);
+ case IrOpcode::kCompressedHeapConstant:
+ return MarkAsCompressed(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
double value = OpParameter<double>(node->op());
if (!IsSmiDouble(value)) MarkAsReference(node);
@@ -1324,8 +1330,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kDebugAbort:
- VisitDebugAbort(node);
+ case IrOpcode::kAbortCSAAssert:
+ VisitAbortCSAAssert(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -1474,6 +1480,7 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUint64Mod:
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
return MarkAsRepresentation(MachineType::PointerRepresentation(), node),
VisitBitcastTaggedToWord(node);
case IrOpcode::kBitcastWordToTagged:
@@ -1734,6 +1741,8 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
return VisitWord32PairSar(node);
+ case IrOpcode::kMemoryBarrier:
+ return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -1808,6 +1817,24 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kF64x2Splat:
+ return MarkAsSimd128(node), VisitF64x2Splat(node);
+ case IrOpcode::kF64x2ExtractLane:
+ return MarkAsFloat64(node), VisitF64x2ExtractLane(node);
+ case IrOpcode::kF64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitF64x2ReplaceLane(node);
+ case IrOpcode::kF64x2Abs:
+ return MarkAsSimd128(node), VisitF64x2Abs(node);
+ case IrOpcode::kF64x2Neg:
+ return MarkAsSimd128(node), VisitF64x2Neg(node);
+ case IrOpcode::kF64x2Eq:
+ return MarkAsSimd128(node), VisitF64x2Eq(node);
+ case IrOpcode::kF64x2Ne:
+ return MarkAsSimd128(node), VisitF64x2Ne(node);
+ case IrOpcode::kF64x2Lt:
+ return MarkAsSimd128(node), VisitF64x2Lt(node);
+ case IrOpcode::kF64x2Le:
+ return MarkAsSimd128(node), VisitF64x2Le(node);
case IrOpcode::kF32x4Splat:
return MarkAsSimd128(node), VisitF32x4Splat(node);
case IrOpcode::kF32x4ExtractLane:
@@ -1846,6 +1873,38 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Lt(node);
case IrOpcode::kF32x4Le:
return MarkAsSimd128(node), VisitF32x4Le(node);
+ case IrOpcode::kI64x2Splat:
+ return MarkAsSimd128(node), VisitI64x2Splat(node);
+ case IrOpcode::kI64x2ExtractLane:
+ return MarkAsWord64(node), VisitI64x2ExtractLane(node);
+ case IrOpcode::kI64x2ReplaceLane:
+ return MarkAsSimd128(node), VisitI64x2ReplaceLane(node);
+ case IrOpcode::kI64x2Neg:
+ return MarkAsSimd128(node), VisitI64x2Neg(node);
+ case IrOpcode::kI64x2Shl:
+ return MarkAsSimd128(node), VisitI64x2Shl(node);
+ case IrOpcode::kI64x2ShrS:
+ return MarkAsSimd128(node), VisitI64x2ShrS(node);
+ case IrOpcode::kI64x2Add:
+ return MarkAsSimd128(node), VisitI64x2Add(node);
+ case IrOpcode::kI64x2Sub:
+ return MarkAsSimd128(node), VisitI64x2Sub(node);
+ case IrOpcode::kI64x2Mul:
+ return MarkAsSimd128(node), VisitI64x2Mul(node);
+ case IrOpcode::kI64x2Eq:
+ return MarkAsSimd128(node), VisitI64x2Eq(node);
+ case IrOpcode::kI64x2Ne:
+ return MarkAsSimd128(node), VisitI64x2Ne(node);
+ case IrOpcode::kI64x2GtS:
+ return MarkAsSimd128(node), VisitI64x2GtS(node);
+ case IrOpcode::kI64x2GeS:
+ return MarkAsSimd128(node), VisitI64x2GeS(node);
+ case IrOpcode::kI64x2ShrU:
+ return MarkAsSimd128(node), VisitI64x2ShrU(node);
+ case IrOpcode::kI64x2GtU:
+ return MarkAsSimd128(node), VisitI64x2GtU(node);
+ case IrOpcode::kI64x2GeU:
+ return MarkAsSimd128(node), VisitI64x2GeU(node);
case IrOpcode::kI32x4Splat:
return MarkAsSimd128(node), VisitI32x4Splat(node);
case IrOpcode::kI32x4ExtractLane:
@@ -2028,6 +2087,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS8x16Shuffle:
return MarkAsSimd128(node), VisitS8x16Shuffle(node);
+ case IrOpcode::kS1x2AnyTrue:
+ return MarkAsWord32(node), VisitS1x2AnyTrue(node);
+ case IrOpcode::kS1x2AllTrue:
+ return MarkAsWord32(node), VisitS1x2AllTrue(node);
case IrOpcode::kS1x4AnyTrue:
return MarkAsWord32(node), VisitS1x4AnyTrue(node);
case IrOpcode::kS1x4AllTrue:
@@ -2489,6 +2552,36 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
+#if !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Abs(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Lt(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Le(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Neg(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AnyTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitS1x2AllTrue(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Shl(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Add(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Sub(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Mul(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Eq(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2Ne(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GtU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI64x2GeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64
+
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
void InstructionSelector::VisitParameter(Node* node) {
@@ -2962,7 +3055,7 @@ void InstructionSelector::CanonicalizeShuffle(bool inputs_equal,
void InstructionSelector::CanonicalizeShuffle(Node* node, uint8_t* shuffle,
bool* is_swizzle) {
// Get raw shuffle indices.
- memcpy(shuffle, OpParameter<uint8_t*>(node->op()), kSimd128Size);
+ memcpy(shuffle, S8x16ShuffleOf(node->op()), kSimd128Size);
bool needs_swap;
bool inputs_equal = GetVirtualRegister(node->InputAt(0)) ==
GetVirtualRegister(node->InputAt(1));
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 4f6b1c5971..16f88bb516 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -19,6 +19,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -266,7 +269,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
SourcePositionTable* source_positions, Frame* frame,
- EnableSwitchJumpTable enable_switch_jump_table,
+ EnableSwitchJumpTable enable_switch_jump_table, TickCounter* tick_counter,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures(),
EnableScheduling enable_scheduling = FLAG_turbo_instruction_scheduling
@@ -496,11 +499,15 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
VectorSlotPair const& feedback,
Node* frame_state);
- void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
- void EmitLookupSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
- void EmitBinarySearchSwitch(const SwitchInfo& sw,
- InstructionOperand& value_operand);
+ void EmitTableSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& index_operand); // NOLINT(runtime/references)
+ void EmitLookupSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
+ void EmitBinarySearchSwitch(
+ const SwitchInfo& sw,
+ InstructionOperand& value_operand); // NOLINT(runtime/references)
void TryRename(InstructionOperand* op);
int GetRename(int virtual_register);
@@ -604,6 +611,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
+ // Visit the load node with a value and opcode to replace with.
+ void VisitLoad(Node* node, Node* value, InstructionCode opcode);
void VisitFinishRegion(Node* node);
void VisitParameter(Node* node);
void VisitIfException(Node* node);
@@ -772,6 +781,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
EnableTraceTurboJson trace_turbo_;
+ TickCounter* const tick_counter_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index c52dca61a1..09c7fe22c5 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -530,7 +530,7 @@ Constant::Constant(RelocatablePtrConstantInfo info) {
}
Handle<HeapObject> Constant::ToHeapObject() const {
- DCHECK_EQ(kHeapObject, type());
+ DCHECK(kHeapObject == type() || kCompressedHeapObject == type());
Handle<HeapObject> value(
reinterpret_cast<Address*>(static_cast<intptr_t>(value_)));
return value;
@@ -561,7 +561,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
return os << constant.ToFloat64().value();
case Constant::kExternalReference:
return os << constant.ToExternalReference().address();
- case Constant::kHeapObject:
+ case Constant::kHeapObject: // Fall through.
+ case Constant::kCompressedHeapObject:
return os << Brief(*constant.ToHeapObject());
case Constant::kRpoNumber:
return os << "RPO" << constant.ToRpoNumber().ToInt();
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 61875a1a17..9b32204055 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -1007,6 +1007,7 @@ class V8_EXPORT_PRIVATE Constant final {
kFloat32,
kFloat64,
kExternalReference,
+ kCompressedHeapObject,
kHeapObject,
kRpoNumber,
kDelayedStringConstant
@@ -1018,8 +1019,9 @@ class V8_EXPORT_PRIVATE Constant final {
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref.address())) {}
- explicit Constant(Handle<HeapObject> obj)
- : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+ explicit Constant(Handle<HeapObject> obj, bool is_compressed = false)
+ : type_(is_compressed ? kCompressedHeapObject : kHeapObject),
+ value_(bit_cast<intptr_t>(obj)) {}
explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
explicit Constant(const StringConstantBase* str)
: type_(kDelayedStringConstant), value_(bit_cast<intptr_t>(str)) {}
diff --git a/deps/v8/src/compiler/backend/jump-threading.h b/deps/v8/src/compiler/backend/jump-threading.h
index e23dd45359..ce60ebcb2e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.h
+++ b/deps/v8/src/compiler/backend/jump-threading.h
@@ -17,14 +17,17 @@ class V8_EXPORT_PRIVATE JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
- InstructionSequence* code, bool frame_at_start);
+ static bool ComputeForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& result, // NOLINT(runtime/references)
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(Zone* local_zone,
- ZoneVector<RpoNumber>& forwarding,
- InstructionSequence* code);
+ static void ApplyForwarding(
+ Zone* local_zone,
+ ZoneVector<RpoNumber>& forwarding, // NOLINT(runtime/references)
+ InstructionSequence* code);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
index 6ed0416045..0a0aadfad1 100644
--- a/deps/v8/src/compiler/backend/live-range-separator.cc
+++ b/deps/v8/src/compiler/backend/live-range-separator.cc
@@ -9,15 +9,16 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
namespace {
void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
- LifetimePosition first_cut, LifetimePosition last_cut) {
+ LifetimePosition first_cut, LifetimePosition last_cut,
+ bool trace_alloc) {
DCHECK(!range->IsSplinter());
// We can ignore ranges that live solely in deferred blocks.
// If a range ends right at the end of a deferred block, it is marked by
@@ -49,9 +50,10 @@ void CreateSplinter(TopLevelLiveRange* range, RegisterAllocationData* data,
range->SetSplinter(splinter);
}
Zone* zone = data->allocation_zone();
- TRACE("creating splinter %d for range %d between %d and %d\n",
- range->splinter()->vreg(), range->vreg(), start.ToInstructionIndex(),
- end.ToInstructionIndex());
+ TRACE_COND(trace_alloc,
+ "creating splinter %d for range %d between %d and %d\n",
+ range->splinter()->vreg(), range->vreg(),
+ start.ToInstructionIndex(), end.ToInstructionIndex());
range->Splinter(start, end, zone);
}
}
@@ -102,7 +104,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
current_block->last_instruction_index());
} else {
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, last_cut);
+ CreateSplinter(range, data, first_cut, last_cut,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -116,7 +119,8 @@ void SplinterLiveRange(TopLevelLiveRange* range, RegisterAllocationData* data) {
// have to connect blocks anyway, so we can also splinter to the end of the
// block, too.
if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, interval_end);
+ CreateSplinter(range, data, first_cut, interval_end,
+ data->is_trace_alloc());
first_cut = LifetimePosition::Invalid();
last_cut = LifetimePosition::Invalid();
}
@@ -186,7 +190,7 @@ void LiveRangeMerger::Merge() {
}
}
-#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 1f79386821..5cec4a8a16 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -80,6 +80,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kInt64:
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -264,8 +265,9 @@ Condition FlagsConditionToConditionTst(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -301,9 +303,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -662,8 +664,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -778,6 +780,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sw(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -785,6 +794,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -816,22 +827,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
@@ -1611,6 +1620,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMipsSync: {
+ __ sync();
+ break;
+ }
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
@@ -3157,7 +3170,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3376,8 +3389,14 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3387,7 +3406,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3397,12 +3417,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ lw(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Subu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3564,6 +3588,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
break;
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index ba64e59429..44e53ac044 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -134,6 +134,7 @@ namespace compiler {
V(MipsStackClaim) \
V(MipsSeb) \
V(MipsSeh) \
+ V(MipsSync) \
V(MipsS128Zero) \
V(MipsI32x4Splat) \
V(MipsI32x4ExtractLane) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 26a3e808cc..92ab3f9344 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -284,6 +284,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsUsh:
case kMipsUsw:
case kMipsUswc1:
+ case kMipsSync:
case kMipsWord32AtomicPairStore:
case kMipsWord32AtomicPairAdd:
case kMipsWord32AtomicPairSub:
@@ -1352,7 +1353,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency((instr->InputCount() - 2) / 2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 0c7299d451..452e92a174 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -274,9 +274,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1775,6 +1775,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
arraysize(temps), temps);
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsSync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
MipsOperandGenerator g(this);
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 5cd9bc54eb..f746b52df6 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -82,6 +82,7 @@ class MipsOperandConverter final : public InstructionOperandConverter {
case Constant::kFloat64:
return Operand::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
@@ -277,8 +278,9 @@ Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
UNREACHABLE();
}
-FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
- FlagsCondition condition) {
+FPUCondition FlagsConditionToConditionCmpFPU(
+ bool& predicate, // NOLINT(runtime/references)
+ FlagsCondition condition) {
switch (condition) {
case kEqual:
predicate = true;
@@ -309,9 +311,9 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ MipsOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -634,8 +636,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -756,6 +758,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ LoadAddress(kScratchReg, &return_location);
+ __ sd(kScratchReg,
+ MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -763,6 +772,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -794,22 +805,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == a0);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
@@ -1786,6 +1795,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Usdc1(ft, i.MemoryOperand(), kScratchReg);
break;
}
+ case kMips64Sync: {
+ __ sync();
+ break;
+ }
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
__ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
@@ -3304,7 +3317,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3535,8 +3548,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(ra, fp);
- __ mov(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3546,7 +3565,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3556,12 +3576,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ ld(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Dsubu(sp, sp, Operand(kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3723,6 +3747,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 24f01b1af1..e375ee8d07 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -163,6 +163,7 @@ namespace compiler {
V(Mips64StackClaim) \
V(Mips64Seb) \
V(Mips64Seh) \
+ V(Mips64Sync) \
V(Mips64AssertEqual) \
V(Mips64S128Zero) \
V(Mips64I32x4Splat) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 499a3da05a..4dcafe4197 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -318,6 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ush:
case kMips64Usw:
case kMips64Uswc1:
+ case kMips64Sync:
case kMips64Word64AtomicStoreWord8:
case kMips64Word64AtomicStoreWord16:
case kMips64Word64AtomicStoreWord32:
@@ -1263,7 +1264,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchLookupSwitchLatency(instr);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 9768a7da9b..95f11ebed1 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -334,9 +334,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -1946,7 +1946,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
// in those cases. Unfortunately, the solution is not complete because
// it might skip cases where Word32 full compare is needed, so
// basically it is a hack.
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+#ifndef USE_SIMULATOR
if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
+#else
+ if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
+ node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall ) {
+#endif
VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
} else {
VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
@@ -2398,6 +2408,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
ArchOpcode opcode = kArchNop;
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 30605df270..5289812cb5 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -79,6 +79,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -262,8 +263,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ PPCOperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -877,8 +879,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1019,6 +1021,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ constexpr int offset = 12;
+ if (isWasmCapiFunction) {
+ __ mflr(kScratchReg);
+ __ bind(&start_call);
+ __ LoadPC(r0);
+ __ addi(r0, r0, Operand(offset));
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ __ mtlr(r0);
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1026,6 +1040,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ // TODO(miladfar): In the above block, r0 must be populated with the
+ // strictly-correct PC, which is the return address at this spot. The
+ // offset is set to 12 right now, which is counted from where we are
+ // binding to the label and ends at this spot. If failed, replace it it
+ // with the correct offset suggested. More info on f5ab7d3.
+ if (isWasmCapiFunction)
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1060,22 +1083,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r4);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -1174,6 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kPPC_Sync: {
+ __ sync();
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -2150,7 +2175,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -2304,14 +2329,20 @@ void CodeGenerator::AssembleConstructFrame() {
auto call_descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ mflr(r0);
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(r0, fp, kConstantPoolRegister);
- // Adjust FP to point to saved FP.
- __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
} else {
- __ Push(r0, fp);
- __ mr(fp, sp);
+ __ mflr(r0);
+ if (FLAG_enable_embedded_constant_pool) {
+ __ Push(r0, fp, kConstantPoolRegister);
+ // Adjust FP to point to saved FP.
+ __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+ } else {
+ __ Push(r0, fp);
+ __ mr(fp, sp);
+ }
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
@@ -2325,7 +2356,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -2335,12 +2367,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ addi(sp, sp, Operand(-kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -2389,7 +2425,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -2554,6 +2590,8 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
break;
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index a34a09b796..f37529bd88 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -13,6 +13,7 @@ namespace compiler {
// Most opcodes specify a single instruction.
#define TARGET_ARCH_OPCODE_LIST(V) \
V(PPC_Peek) \
+ V(PPC_Sync) \
V(PPC_And) \
V(PPC_AndComplement) \
V(PPC_Or) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index e5f7d7e45a..61c2d2be3b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -143,6 +143,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Push:
case kPPC_PushFrame:
case kPPC_StoreToStackSlot:
+ case kPPC_Sync:
return kHasSideEffect;
case kPPC_AtomicStoreUint8:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bb503763c2..bfc77b9412 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -173,9 +173,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -1853,6 +1853,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
g.UseRegister(left), g.UseRegister(right));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Sync, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index 57ea2c1a26..44701f8159 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -9,6 +9,7 @@
#include "src/base/adapters.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
@@ -17,11 +18,13 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+#define TRACE_COND(cond, ...) \
+ do { \
+ if (cond) PrintF(__VA_ARGS__); \
} while (false)
+#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
+
namespace {
static constexpr int kFloat32Bit =
@@ -1119,8 +1122,9 @@ void TopLevelLiveRange::Verify() const {
}
}
-void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
- TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
+void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
+ start.value());
DCHECK_NOT_NULL(first_interval_);
DCHECK(first_interval_->start() <= start);
DCHECK(start < first_interval_->end());
@@ -1128,9 +1132,10 @@ void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
}
void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
+ start.value(), end.value());
LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
@@ -1148,9 +1153,10 @@ void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
}
void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
- LifetimePosition end, Zone* zone) {
- TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
- end.value());
+ LifetimePosition end, Zone* zone,
+ bool trace_alloc) {
+ TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
+ start.value(), end.value());
if (first_interval_ == nullptr) {
UseInterval* interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
@@ -1173,9 +1179,10 @@ void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
}
}
-void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
+void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
LifetimePosition pos = use_pos->pos();
- TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
+ TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
+ pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
UsePosition* current = first_pos_;
@@ -1309,13 +1316,8 @@ void LinearScanAllocator::PrintRangeRow(std::ostream& os,
if (range->spilled()) {
prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
} else {
- const char* reg_name;
- if (range->assigned_register() == kUnassignedRegister) {
- reg_name = "???";
- } else {
- reg_name = RegisterName(range->assigned_register());
- }
- prefix = snprintf(buffer, max_prefix_length, "|%s", reg_name);
+ prefix = snprintf(buffer, max_prefix_length, "|%s",
+ RegisterName(range->assigned_register()));
}
os << buffer;
position += std::min(prefix, max_prefix_length - 1);
@@ -1469,7 +1471,7 @@ void RegisterAllocationData::PhiMapValue::CommitAssignment(
RegisterAllocationData::RegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, RegisterAllocationFlags flags,
- const char* debug_name)
+ TickCounter* tick_counter, const char* debug_name)
: allocation_zone_(zone),
frame_(frame),
code_(code),
@@ -1496,7 +1498,8 @@ RegisterAllocationData::RegisterAllocationData(
preassigned_slot_ranges_(zone),
spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
zone),
- flags_(flags) {
+ flags_(flags),
+ tick_counter_(tick_counter) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
@@ -1815,6 +1818,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) {
+ data_->tick_counter()->DoTick();
MeetRegisterConstraints(block);
}
}
@@ -1973,14 +1977,6 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
- } else if (!code()->IsReference(input_vreg) &&
- code()->IsReference(output_vreg)) {
- // The input is assumed to immediately have a tagged representation,
- // before the pointer map can be used. I.e. the pointer map at the
- // instruction will include the output operand (whose value at the
- // beginning of the instruction is equal to the input operand). If
- // this is not desired, then the pointer map at this instruction needs
- // to be adjusted manually.
}
}
}
@@ -1988,6 +1984,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
+ data_->tick_counter()->DoTick();
ResolvePhis(block);
}
}
@@ -2071,7 +2068,8 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->AddUseInterval(start, end, allocation_zone());
+ range->AddUseInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
}
@@ -2192,16 +2190,18 @@ UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
if (range->IsEmpty() || range->Start() > position) {
// Can happen if there is a definition without use.
- range->AddUseInterval(position, position.NextStart(), allocation_zone());
- range->AddUsePosition(NewUsePosition(position.NextStart()));
+ range->AddUseInterval(position, position.NextStart(), allocation_zone(),
+ data()->is_trace_alloc());
+ range->AddUsePosition(NewUsePosition(position.NextStart()),
+ data()->is_trace_alloc());
} else {
- range->ShortenTo(position);
+ range->ShortenTo(position, data()->is_trace_alloc());
}
if (!operand->IsUnallocated()) return nullptr;
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
UsePosition* use_pos =
NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
return use_pos;
}
@@ -2216,9 +2216,10 @@ UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
if (operand->IsUnallocated()) {
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
- range->AddUsePosition(use_pos);
+ range->AddUsePosition(use_pos, data()->is_trace_alloc());
}
- range->AddUseInterval(block_start, position, allocation_zone());
+ range->AddUseInterval(block_start, position, allocation_zone(),
+ data()->is_trace_alloc());
return use_pos;
}
@@ -2279,7 +2280,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int code = config()->GetAllocatableGeneralCode(i);
TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
@@ -2291,7 +2292,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat64, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
// Clobber fixed float registers on archs with non-simple aliasing.
if (!kSimpleFPAliasing) {
@@ -2304,7 +2305,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat32, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
if (fixed_simd128_live_ranges) {
@@ -2314,7 +2315,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kSimd128, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
- allocation_zone());
+ allocation_zone(), data()->is_trace_alloc());
}
}
}
@@ -2574,7 +2575,8 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
- range->EnsureInterval(start, end, allocation_zone());
+ range->EnsureInterval(start, end, allocation_zone(),
+ data()->is_trace_alloc());
iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
@@ -2588,6 +2590,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
+ data_->tick_counter()->DoTick();
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data());
@@ -2607,6 +2610,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Postprocess the ranges.
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
+ data_->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr) continue;
@@ -2773,7 +2777,7 @@ void BundleBuilder::BuildBundles() {
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
- if (out->TryMerge(input_bundle))
+ if (out->TryMerge(input_bundle, data()->is_trace_alloc()))
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else {
@@ -2798,7 +2802,7 @@ bool LiveRangeBundle::TryAddRange(LiveRange* range) {
InsertUses(range->first_interval());
return true;
}
-bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
+bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
if (other == this) return true;
auto iter1 = uses_.begin();
@@ -2810,8 +2814,8 @@ bool LiveRangeBundle::TryMerge(LiveRangeBundle* other) {
} else if (iter2->start > iter1->end) {
++iter1;
} else {
- TRACE("No merge %d:%d %d:%d\n", iter1->start, iter1->end, iter2->start,
- iter2->end);
+ TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
+ iter1->end, iter2->start, iter2->end);
return false;
}
}
@@ -3042,6 +3046,7 @@ void RegisterAllocator::Spill(LiveRange* range, SpillMode spill_mode) {
}
const char* RegisterAllocator::RegisterName(int register_code) const {
+ if (register_code == kUnassignedRegister) return "unassigned";
return mode() == GENERAL_REGISTERS
? i::RegisterName(Register::from_code(register_code))
: i::RegisterName(DoubleRegister::from_code(register_code));
@@ -3408,7 +3413,7 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors(
to_be_live->emplace(val.first, reg);
TRACE("Reset %d as live due vote %zu in %s\n",
val.first->TopLevel()->vreg(), val.second.count,
- reg == kUnassignedRegister ? "unassigned" : RegisterName(reg));
+ RegisterName(reg));
}
}
};
@@ -3477,6 +3482,8 @@ void LinearScanAllocator::UpdateDeferredFixedRanges(SpillMode spill_mode,
RegisterName(other->assigned_register()));
LiveRange* split_off =
other->SplitAt(next_start, data()->allocation_zone());
+ // Try to get the same register after the deferred block.
+ split_off->set_controlflow_hint(other->assigned_register());
DCHECK_NE(split_off, other);
AddToUnhandled(split_off);
update_caches(other);
@@ -3574,7 +3581,7 @@ void LinearScanAllocator::AllocateRegisters() {
SplitAndSpillRangesDefinedByMemoryOperand();
data()->ResetSpillState();
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
@@ -3642,6 +3649,7 @@ void LinearScanAllocator::AllocateRegisters() {
while (!unhandled_live_ranges().empty() ||
(data()->is_turbo_control_flow_aware_allocation() &&
last_block < max_blocks)) {
+ data()->tick_counter()->DoTick();
LiveRange* current = unhandled_live_ranges().empty()
? nullptr
: *unhandled_live_ranges().begin();
@@ -3824,7 +3832,7 @@ void LinearScanAllocator::AllocateRegisters() {
ProcessCurrentRange(current, spill_mode);
}
- if (FLAG_trace_alloc) {
+ if (data()->is_trace_alloc()) {
PrintRangeOverview(std::cout);
}
}
@@ -4557,6 +4565,14 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
LiveRange* third_part =
SplitBetween(second_part, split_start, third_part_end);
+ if (GetInstructionBlock(data()->code(), second_part->Start())
+ ->IsDeferred()) {
+ // Try to use the same register as before.
+ TRACE("Setting control flow hint for %d:%d to %s\n",
+ third_part->TopLevel()->vreg(), third_part->relative_id(),
+ RegisterName(range->controlflow_hint()));
+ third_part->set_controlflow_hint(range->controlflow_hint());
+ }
AddToUnhandled(third_part);
// This can happen, even if we checked for start < end above, as we fiddle
@@ -4601,6 +4617,7 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::DecideSpillingMode() {
if (data()->is_turbo_control_flow_aware_allocation()) {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
int max_blocks = data()->code()->InstructionBlockCount();
if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
// If the range is spilled only in deferred blocks and starts in
@@ -4629,6 +4646,7 @@ void OperandAssigner::DecideSpillingMode() {
void OperandAssigner::AssignSpillSlots() {
for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
if (range != nullptr && range->get_bundle() != nullptr) {
range->get_bundle()->MergeSpillRanges();
}
@@ -4636,6 +4654,7 @@ void OperandAssigner::AssignSpillSlots() {
ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
for (size_t i = 0; i < spill_ranges.size(); ++i) {
+ data()->tick_counter()->DoTick();
SpillRange* range = spill_ranges[i];
if (range == nullptr) continue;
if (range->IsEmpty()) continue;
@@ -4648,6 +4667,7 @@ void OperandAssigner::AssignSpillSlots() {
}
// Allocate slots for the merged spill ranges.
for (SpillRange* range : spill_ranges) {
+ data()->tick_counter()->DoTick();
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
if (!range->HasSlot()) {
@@ -4660,6 +4680,7 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() {
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ data()->tick_counter()->DoTick();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (top_range == nullptr || top_range->IsEmpty()) continue;
@@ -4859,6 +4880,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
BitVector* live = live_in_sets[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
+ data()->tick_counter()->DoTick();
int vreg = iterator.Current();
LiveRangeBoundArray* array = finder.ArrayFor(vreg);
for (const RpoNumber& pred : block->predecessors()) {
@@ -5130,6 +5152,7 @@ void LiveRangeConnector::CommitSpillsInDeferredBlocks(
}
#undef TRACE
+#undef TRACE_COND
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index 8929fb2ee6..55f8a8dd1f 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
static const int32_t kUnassignedRegister = RegisterConfiguration::kMaxRegisters;
@@ -175,7 +178,8 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
enum class RegisterAllocationFlag : unsigned {
kTurboControlFlowAwareAllocation = 1 << 0,
- kTurboPreprocessRanges = 1 << 1
+ kTurboPreprocessRanges = 1 << 1,
+ kTraceAllocation = 1 << 2
};
using RegisterAllocationFlags = base::Flags<RegisterAllocationFlag>;
@@ -198,6 +202,10 @@ class RegisterAllocationData final : public ZoneObject {
return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ bool is_trace_alloc() {
+ return flags_ & RegisterAllocationFlag::kTraceAllocation;
+ }
+
static constexpr int kNumberOfFixedRangesPerRegister = 2;
class PhiMapValue : public ZoneObject {
@@ -238,6 +246,7 @@ class RegisterAllocationData final : public ZoneObject {
Zone* allocation_zone, Frame* frame,
InstructionSequence* code,
RegisterAllocationFlags flags,
+ TickCounter* tick_counter,
const char* debug_name = nullptr);
const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
@@ -328,6 +337,8 @@ class RegisterAllocationData final : public ZoneObject {
void ResetSpillState() { spill_state_.clear(); }
+ TickCounter* tick_counter() { return tick_counter_; }
+
private:
int GetNextLiveRangeId();
@@ -354,6 +365,7 @@ class RegisterAllocationData final : public ZoneObject {
RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
RegisterAllocationFlags flags_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@@ -741,7 +753,7 @@ class LiveRangeBundle : public ZoneObject {
: ranges_(zone), uses_(zone), id_(id) {}
bool TryAddRange(LiveRange* range);
- bool TryMerge(LiveRangeBundle* other);
+ bool TryMerge(LiveRangeBundle* other, bool trace_alloc);
ZoneSet<LiveRange*, LiveRangeOrdering> ranges_;
ZoneSet<Range, RangeOrdering> uses_;
@@ -785,12 +797,14 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
SlotUseKind slot_use_kind() const { return HasSlotUseField::decode(bits_); }
// Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUsePosition(UsePosition* pos);
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone,
+ bool trace_alloc);
+ void AddUsePosition(UsePosition* pos, bool trace_alloc);
// Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
+ void ShortenTo(LifetimePosition start, bool trace_alloc);
// Detaches between start and end, and attributes the resulting range to
// result.
@@ -1279,11 +1293,13 @@ class LinearScanAllocator final : public RegisterAllocator {
RangeWithRegister::Equals>;
void MaybeUndoPreviousSplit(LiveRange* range);
- void SpillNotLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position, SpillMode spill_mode);
+ void SpillNotLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position, SpillMode spill_mode);
LiveRange* AssignRegisterOnReload(LiveRange* range, int reg);
- void ReloadLiveRanges(RangeWithRegisterSet& to_be_live,
- LifetimePosition position);
+ void ReloadLiveRanges(
+ RangeWithRegisterSet& to_be_live, // NOLINT(runtime/references)
+ LifetimePosition position);
void UpdateDeferredFixedRanges(SpillMode spill_mode, InstructionBlock* block);
bool BlockIsDeferredOrImmediatePredecessorIsNotDeferred(
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 595800268d..6457b7c8b4 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -73,6 +73,7 @@ class S390OperandConverter final : public InstructionOperandConverter {
case Constant::kDelayedStringConstant:
return Operand::EmbeddedStringConstant(
constant.ToDelayedStringConstant());
+ case Constant::kCompressedHeapObject:
case Constant::kHeapObject:
case Constant::kRpoNumber:
break;
@@ -1245,8 +1246,9 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, Instruction* instr,
+ S390OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
if (access_mode == kMemoryAccessPoisoned) {
@@ -1380,8 +1382,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!instr->InputAt(0)->IsImmediate());
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -1509,6 +1511,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
+ Label return_location;
+ // Put the return address in a stack slot.
+ if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
+ // Put the return address in a stack slot.
+ __ larl(r0, &return_location);
+ __ StoreP(r0, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
@@ -1516,6 +1525,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
+ __ bind(&return_location);
+ RecordSafepoint(instr->reference_map(), Safepoint::kNoLazyDeopt);
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
@@ -1547,22 +1558,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == r3);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
- __ stop("kArchDebugAbort");
+ __ stop();
break;
case kArchDebugBreak:
- __ stop("kArchDebugBreak");
+ __ stop();
break;
case kArchNop:
case kArchThrowTerminator:
@@ -2891,7 +2900,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
+ __ stop();
}
}
}
@@ -3014,8 +3023,14 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
if (call_descriptor->IsCFunctionCall()) {
- __ Push(r14, fp);
- __ LoadRR(fp, sp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ } else {
+ __ Push(r14, fp);
+ __ LoadRR(fp, sp);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
if (call_descriptor->PushArgumentCount()) {
@@ -3028,7 +3043,8 @@ void CodeGenerator::AssembleConstructFrame() {
__ StubPrologue(type);
if (call_descriptor->IsWasmFunctionCall()) {
__ Push(kWasmInstanceRegister);
- } else if (call_descriptor->IsWasmImportWrapper()) {
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
// WASM import wrappers are passed a tuple in the place of the instance.
// Unpack the tuple into the instance and the target callable.
// This must be done here in the codegen because it cannot be expressed
@@ -3038,12 +3054,16 @@ void CodeGenerator::AssembleConstructFrame() {
__ LoadP(kWasmInstanceRegister,
FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
__ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ lay(sp, MemOperand(sp, -kSystemPointerSize));
+ }
}
}
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
__ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
@@ -3089,7 +3109,7 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
+ __ stop();
}
__ bind(&done);
@@ -3247,6 +3267,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ break;
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
break;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index d982605efc..99d3b0fa0f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -447,11 +447,13 @@ void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
#endif
template <class CanCombineWithLoad>
-void GenerateRightOperands(InstructionSelector* selector, Node* node,
- Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateRightOperands(
+ InstructionSelector* selector, Node* node, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
if ((operand_mode & OperandMode::kAllowImmediate) &&
@@ -491,11 +493,13 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
}
template <class CanCombineWithLoad>
-void GenerateBinOpOperands(InstructionSelector* selector, Node* node,
- Node* left, Node* right, InstructionCode& opcode,
- OperandModes& operand_mode,
- InstructionOperand* inputs, size_t& input_count,
- CanCombineWithLoad canCombineWithLoad) {
+void GenerateBinOpOperands(
+ InstructionSelector* selector, Node* node, Node* left, Node* right,
+ InstructionCode& opcode, // NOLINT(runtime/references)
+ OperandModes& operand_mode, // NOLINT(runtime/references)
+ InstructionOperand* inputs,
+ size_t& input_count, // NOLINT(runtime/references)
+ CanCombineWithLoad canCombineWithLoad) {
S390OperandGenerator g(selector);
// left is always register
InstructionOperand const left_input = g.UseRegister(left);
@@ -686,9 +690,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
void InstructionSelector::VisitLoad(Node* node) {
@@ -2194,6 +2198,11 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ S390OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput());
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
diff --git a/deps/v8/src/compiler/backend/unwinding-info-writer.h b/deps/v8/src/compiler/backend/unwinding-info-writer.h
index 590a839a06..d3a52b34b7 100644
--- a/deps/v8/src/compiler/backend/unwinding-info-writer.h
+++ b/deps/v8/src/compiler/backend/unwinding-info-writer.h
@@ -23,6 +23,7 @@ namespace v8 {
namespace internal {
class EhFrameWriter;
+class Zone;
namespace compiler {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index c6667292fc..a108edeff0 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -349,7 +349,8 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i, int pc) {
+ X64OperandConverter& i, // NOLINT(runtime/references)
+ int pc) {
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessProtected) {
@@ -357,9 +358,9 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter& i) {
+void EmitWordLoadPoisoningIfNeeded(
+ CodeGenerator* codegen, InstructionCode opcode, Instruction* instr,
+ X64OperandConverter& i) { // NOLINT(runtime/references)
const MemoryAccessMode access_mode =
static_cast<MemoryAccessMode>(MiscField::decode(opcode));
if (access_mode == kMemoryAccessPoisoned) {
@@ -575,6 +576,19 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \
} while (false)
+#define ASSEMBLE_SIMD_ALL_TRUE(opcode) \
+ do { \
+ CpuFeatureScope sse_scope(tasm(), SSE4_1); \
+ Register dst = i.OutputRegister(); \
+ Register tmp = i.TempRegister(0); \
+ __ movq(tmp, Immediate(1)); \
+ __ xorq(dst, dst); \
+ __ pxor(kScratchDoubleReg, kScratchDoubleReg); \
+ __ opcode(kScratchDoubleReg, i.InputSimd128Register(0)); \
+ __ ptest(kScratchDoubleReg, kScratchDoubleReg); \
+ __ cmovq(zero, dst, tmp); \
+ } while (false)
+
void CodeGenerator::AssembleDeconstructFrame() {
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
__ movq(rsp, rbp);
@@ -752,8 +766,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchCallBuiltinPointer: {
DCHECK(!HasImmediateInput(instr, 0));
- Register builtin_pointer = i.InputRegister(0);
- __ CallBuiltinPointer(builtin_pointer);
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
@@ -952,17 +966,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchDebugAbort:
+ case kArchAbortCSAAssert:
DCHECK(i.InputRegister(0) == rdx);
- if (!frame_access_state()->has_frame()) {
+ {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
- } else {
- __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
- RelocInfo::CODE_TARGET);
+ __ Call(
+ isolate()->builtins()->builtin_handle(Builtins::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
}
__ int3();
unwinding_info_writer_.MarkBlockWillExit();
@@ -1029,9 +1041,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
OutOfLineRecordWrite(this, object, operand, value, scratch0, scratch1,
mode, DetermineStubCallMode());
__ StoreTaggedField(operand, value);
- if (COMPRESS_POINTERS_BOOL) {
- __ DecompressTaggedPointer(object, object);
- }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1042,7 +1051,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
__ andq(i.InputRegister(0), kSpeculationPoisonRegister);
break;
- case kLFence:
+ case kX64MFence:
+ __ mfence();
+ break;
+ case kX64LFence:
__ lfence();
break;
case kArchStackSlot: {
@@ -1309,16 +1321,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 33);
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 33);
+ __ Andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 31);
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 31);
+ __ Xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Sqrt:
@@ -1517,18 +1529,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kX64F64x2Abs:
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psrlq(kScratchDoubleReg, 1);
- __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, 1);
+ __ Andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
+ case kX64F64x2Neg:
case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ psllq(kScratchDoubleReg, 63);
- __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, 63);
+ __ Xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Sqrt:
@@ -1944,16 +1958,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64MovqCompressTagged: {
@@ -1970,16 +1987,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64DecompressSigned: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedSigned);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressPointer: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressTaggedPointer);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64DecompressAny: {
CHECK(instr->HasOutput());
ASSEMBLE_MOVX(DecompressAnyTagged);
+ EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64CompressSigned: // Fall through.
@@ -2006,11 +2026,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movss(operand, i.InputDoubleRegister(index));
+ __ Movss(operand, i.InputDoubleRegister(index));
}
break;
case kX64Movsd: {
@@ -2039,11 +2059,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CpuFeatureScope sse_scope(tasm(), SSSE3);
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, i, __ pc_offset());
if (instr->HasOutput()) {
- __ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
- __ movdqu(operand, i.InputSimd128Register(index));
+ __ Movdqu(operand, i.InputSimd128Register(index));
}
break;
}
@@ -2065,7 +2085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsRegister()) {
__ Movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
- __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ __ Movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kX64BitcastLD:
@@ -2235,6 +2255,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64F64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ pshufd(dst, i.InputDoubleRegister(0), 0x44);
+ } else {
+ __ pshufd(dst, i.InputOperand(0), 0x44);
+ }
+ break;
+ }
+ case kX64F64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsFPRegister()) {
+ __ movq(kScratchRegister, i.InputDoubleRegister(2));
+ __ pinsrq(i.OutputSimd128Register(), kScratchRegister, i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64F64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movq(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltpd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F64x2Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmplepd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
// TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
case kX64F32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
@@ -2400,6 +2465,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I64x2Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(dst, i.InputRegister(0));
+ } else {
+ __ movq(dst, i.InputOperand(0));
+ }
+ __ pshufd(dst, dst, 0x44);
+ break;
+ }
+ case kX64I64x2ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pextrq(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrq(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrq(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I64x2Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+ if (dst == src) {
+ __ movapd(kScratchDoubleReg, src);
+ src = kScratchDoubleReg;
+ }
+ __ pxor(dst, dst);
+ __ psubq(dst, src);
+ break;
+ }
+ case kX64I64x2Shl: {
+ __ psllq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2ShrS: {
+ // TODO(zhin): there is vpsraq but requires AVX512
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // ShrS on each quadword one at a time
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(0);
+
+ // lower quadword
+ __ pextrq(kScratchRegister, src, 0x0);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x0);
+
+ // upper quadword
+ __ pextrq(kScratchRegister, src, 0x1);
+ __ sarq(kScratchRegister, Immediate(i.InputInt8(1)));
+ __ pinsrq(dst, kScratchRegister, 0x1);
+ break;
+ }
+ case kX64I64x2Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ paddq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ psubq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ XMMRegister left = i.InputSimd128Register(0);
+ XMMRegister right = i.InputSimd128Register(1);
+ XMMRegister tmp1 = i.ToSimd128Register(instr->TempAt(0));
+ XMMRegister tmp2 = i.ToSimd128Register(instr->TempAt(1));
+
+ __ movaps(tmp1, left);
+ __ movaps(tmp2, right);
+
+ // Multiply high dword of each qword of left with right.
+ __ psrlq(tmp1, 32);
+ __ pmuludq(tmp1, right);
+
+ // Multiply high dword of each qword of right with left.
+ __ psrlq(tmp2, 32);
+ __ pmuludq(tmp2, left);
+
+ __ paddq(tmp2, tmp1);
+ __ psllq(tmp2, 32);
+
+ __ pmuludq(left, right);
+ __ paddq(left, tmp2); // left == dst
+ break;
+ }
+ case kX64I64x2Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pcmpeqq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I64x2GtS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ __ pcmpgtq(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I64x2GeS: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ movaps(tmp, src);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
+ case kX64I64x2ShrU: {
+ __ psrlq(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I64x2GtU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pcmpgtq(dst, tmp);
+ break;
+ }
+ case kX64I64x2GeU: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_2);
+ XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src = i.InputSimd128Register(1);
+ XMMRegister tmp = i.ToSimd128Register(instr->TempAt(0));
+
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+
+ __ movaps(tmp, src);
+ __ pxor(dst, kScratchDoubleReg);
+ __ pxor(tmp, kScratchDoubleReg);
+ __ pcmpgtq(tmp, dst);
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, tmp);
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (instr->InputAt(0)->IsRegister()) {
@@ -3297,6 +3527,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ por(dst, kScratchDoubleReg);
break;
}
+ case kX64S1x2AnyTrue:
case kX64S1x4AnyTrue:
case kX64S1x8AnyTrue:
case kX64S1x16AnyTrue: {
@@ -3310,19 +3541,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ cmovq(zero, dst, tmp);
break;
}
- case kX64S1x4AllTrue:
- case kX64S1x8AllTrue:
+ // Need to split up all the different lane structures because the
+ // comparison instruction used matters, e.g. given 0xff00, pcmpeqb returns
+ // 0x0011, pcmpeqw returns 0x0000, ptest will set ZF to 0 and 1
+ // respectively.
+ case kX64S1x2AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqq);
+ break;
+ }
+ case kX64S1x4AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqd);
+ break;
+ }
+ case kX64S1x8AllTrue: {
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqw);
+ break;
+ }
case kX64S1x16AllTrue: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- Register dst = i.OutputRegister();
- XMMRegister src = i.InputSimd128Register(0);
- Register tmp = i.TempRegister(0);
- __ movq(tmp, Immediate(1));
- __ xorq(dst, dst);
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ pxor(kScratchDoubleReg, src);
- __ ptest(kScratchDoubleReg, kScratchDoubleReg);
- __ cmovq(zero, dst, tmp);
+ ASSEMBLE_SIMD_ALL_TRUE(pcmpeqb);
break;
}
case kX64StackCheck:
@@ -3507,6 +3743,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_INSTR
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
+#undef ASSEMBLE_SIMD_ALL_TRUE
namespace {
@@ -3734,6 +3971,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (call_descriptor->IsCFunctionCall()) {
__ pushq(rbp);
__ movq(rbp, rsp);
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)));
+ // Reserve stack space for saving the c_entry_fp later.
+ __ AllocateStackSpace(kSystemPointerSize);
+ }
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
if (call_descriptor->PushArgumentCount()) {
@@ -3765,8 +4007,8 @@ void CodeGenerator::AssembleConstructFrame() {
unwinding_info_writer_.MarkFrameConstructed(pc_base);
}
- int required_slots = frame()->GetTotalFrameSlotCount() -
- call_descriptor->CalculateFixedFrameSize();
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -3835,7 +4077,7 @@ void CodeGenerator::AssembleConstructFrame() {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(Operand(rsp, kQuadWordSize * slot_idx),
+ __ Movdqu(Operand(rsp, kQuadWordSize * slot_idx),
XMMRegister::from_code(i));
slot_idx++;
}
@@ -3877,7 +4119,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
int slot_idx = 0;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
if (!((1 << i) & saves_fp)) continue;
- __ movdqu(XMMRegister::from_code(i),
+ __ Movdqu(XMMRegister::from_code(i),
Operand(rsp, kQuadWordSize * slot_idx));
slot_idx++;
}
@@ -3970,6 +4212,16 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
break;
}
+ case Constant::kCompressedHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ Move(dst, src_object, RelocInfo::COMPRESSED_EMBEDDED_OBJECT);
+ }
+ break;
+ }
case Constant::kDelayedStringConstant: {
const StringConstantBase* src_constant = src.ToDelayedStringConstant();
__ MoveStringConstant(dst, src_constant);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 57ef26dbd7..d6ac3f43df 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -58,7 +58,8 @@ namespace compiler {
V(X64Popcnt32) \
V(X64Bswap) \
V(X64Bswap32) \
- V(LFence) \
+ V(X64MFence) \
+ V(X64LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -158,6 +159,15 @@ namespace compiler {
V(X64Poke) \
V(X64Peek) \
V(X64StackCheck) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
V(X64F32x4Splat) \
V(X64F32x4ExtractLane) \
V(X64F32x4ReplaceLane) \
@@ -177,6 +187,22 @@ namespace compiler {
V(X64F32x4Ne) \
V(X64F32x4Lt) \
V(X64F32x4Le) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2ReplaceLane) \
+ V(X64I64x2Neg) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2Ne) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2GtU) \
+ V(X64I64x2GeU) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
@@ -293,6 +319,8 @@ namespace compiler {
V(X64S8x8Reverse) \
V(X64S8x4Reverse) \
V(X64S8x2Reverse) \
+ V(X64S1x2AnyTrue) \
+ V(X64S1x2AllTrue) \
V(X64S1x4AnyTrue) \
V(X64S1x4AllTrue) \
V(X64S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 9d48e9175a..6389ef2e50 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -124,6 +124,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F64x2Splat:
+ case kX64F64x2ExtractLane:
+ case kX64F64x2ReplaceLane:
+ case kX64F64x2Abs:
+ case kX64F64x2Neg:
+ case kX64F64x2Eq:
+ case kX64F64x2Ne:
+ case kX64F64x2Lt:
+ case kX64F64x2Le:
case kX64F32x4Splat:
case kX64F32x4ExtractLane:
case kX64F32x4ReplaceLane:
@@ -143,6 +152,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64F32x4Ne:
case kX64F32x4Lt:
case kX64F32x4Le:
+ case kX64I64x2Splat:
+ case kX64I64x2ExtractLane:
+ case kX64I64x2ReplaceLane:
+ case kX64I64x2Neg:
+ case kX64I64x2Shl:
+ case kX64I64x2ShrS:
+ case kX64I64x2Add:
+ case kX64I64x2Sub:
+ case kX64I64x2Mul:
+ case kX64I64x2Eq:
+ case kX64I64x2Ne:
+ case kX64I64x2GtS:
+ case kX64I64x2GeS:
+ case kX64I64x2ShrU:
+ case kX64I64x2GtU:
+ case kX64I64x2GeU:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -233,6 +258,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64S128Not:
case kX64S128Select:
case kX64S128Zero:
+ case kX64S1x2AnyTrue:
+ case kX64S1x2AllTrue:
case kX64S1x4AnyTrue:
case kX64S1x4AllTrue:
case kX64S1x8AnyTrue:
@@ -327,7 +354,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Poke:
return kHasSideEffect;
- case kLFence:
+ case kX64MFence:
+ case kX64LFence:
return kHasSideEffect;
case kX64Word64AtomicLoadUint8:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index a20590b8d3..a4908fb846 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -309,21 +309,19 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitDebugAbort(Node* node) {
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
X64OperandGenerator g(this);
-
- ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= MiscField::encode(kMemoryAccessProtected);
@@ -334,6 +332,11 @@ void InstructionSelector::VisitLoad(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
+
void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
@@ -898,7 +901,8 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
// Omit truncation and turn subtractions of constant values into immediate
// "leal" instructions by negating the value.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), int64_input, g.TempImmediate(-imm));
+ g.DefineAsRegister(node), int64_input,
+ g.TempImmediate(base::NegateWithWraparound(imm)));
}
return;
}
@@ -907,9 +911,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
if (m.left().Is(0)) {
Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
} else if (m.right().Is(0)) {
- // TODO(jarin): We should be able to use {EmitIdentity} here
- // (https://crbug.com/v8/7947).
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+ // {EmitIdentity} reuses the virtual register of the first input
+ // for the output. This is exactly what we want here.
+ EmitIdentity(node);
} else if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
// Turn subtractions of constant values into immediate "leal" instructions
// by negating the value.
@@ -1254,23 +1258,47 @@ void InstructionSelector::VisitChangeTaggedSignedToCompressedSigned(
}
void InstructionSelector::VisitChangeCompressedToTagged(Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressed);
+ VisitLoad(node, value, kX64MovqDecompressAnyTagged);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressAny, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedPointerToTaggedPointer(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedPointer);
+ VisitLoad(node, value, kX64MovqDecompressTaggedPointer);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressPointer, g.DefineAsRegister(node), g.Use(value));
+ }
}
void InstructionSelector::VisitChangeCompressedSignedToTaggedSigned(
Node* node) {
- X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kPoisonedLoad) &&
+ CanCover(node, value)) {
+ DCHECK_EQ(LoadRepresentationOf(value->op()).representation(),
+ MachineRepresentation::kCompressedSigned);
+ VisitLoad(node, value, kX64MovqDecompressTaggedSigned);
+ } else {
+ X64OperandGenerator g(this);
+ Emit(kX64DecompressSigned, g.DefineAsRegister(node), g.Use(value));
+ }
}
namespace {
@@ -2343,6 +2371,11 @@ void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64MFence, g.NoOutput());
+}
+
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
@@ -2545,12 +2578,18 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F64x2) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -2561,6 +2600,11 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2615,12 +2659,18 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Or) \
V(S128Xor)
+#define SIMD_BINOP_ONE_TEMP_LIST(V) \
+ V(I64x2GeS) \
+ V(I64x2GtU) \
+ V(I64x2GeU)
+
#define SIMD_UNOP_LIST(V) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
+ V(I64x2Neg) \
V(I32x4SConvertI16x8Low) \
V(I32x4SConvertI16x8High) \
V(I32x4Neg) \
@@ -2635,6 +2685,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Not)
#define SIMD_SHIFT_OPCODES(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4ShrU) \
@@ -2646,11 +2699,13 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrU)
#define SIMD_ANYTRUE_LIST(V) \
+ V(S1x2AnyTrue) \
V(S1x4AnyTrue) \
V(S1x8AnyTrue) \
V(S1x16AnyTrue)
#define SIMD_ALLTRUE_LIST(V) \
+ V(S1x2AllTrue) \
V(S1x4AllTrue) \
V(S1x8AllTrue) \
V(S1x16AllTrue)
@@ -2721,6 +2776,18 @@ SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
#undef SIMD_BINOP_LIST
+#define VISIT_SIMD_BINOP_ONE_TEMP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ arraysize(temps), temps); \
+ }
+SIMD_BINOP_ONE_TEMP_LIST(VISIT_SIMD_BINOP_ONE_TEMP)
+#undef VISIT_SIMD_BINOP_ONE_TEMP
+#undef SIMD_BINOP_ONE_TEMP_LIST
+
#define VISIT_SIMD_ANYTRUE(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2751,12 +2818,33 @@ void InstructionSelector::VisitS128Select(Node* node) {
g.UseRegister(node->InputAt(2)));
}
+void InstructionSelector::VisitF64x2Abs(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitF64x2Neg(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64F32x4UConvertI32x4, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitI64x2Mul(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register(),
+ g.TempSimd128Register()};
+ Emit(kX64I64x2Mul, g.DefineSameAsFirst(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 9c23cd460a..b44bec5fc8 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -79,22 +79,28 @@ ResumeJumpTarget ResumeJumpTarget::AtLoopHeader(int loop_header_offset,
}
BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
- Zone* zone, bool do_liveness_analysis)
+ Zone* zone, BailoutId osr_bailout_id,
+ bool analyze_liveness)
: bytecode_array_(bytecode_array),
- do_liveness_analysis_(do_liveness_analysis),
zone_(zone),
+ osr_bailout_id_(osr_bailout_id),
+ analyze_liveness_(analyze_liveness),
loop_stack_(zone),
loop_end_index_queue_(zone),
resume_jump_targets_(zone),
end_to_header_(zone),
header_to_info_(zone),
osr_entry_point_(-1),
- liveness_map_(bytecode_array->length(), zone) {}
+ liveness_map_(bytecode_array->length(), zone) {
+ Analyze();
+}
namespace {
-void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateInLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& in_liveness, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -201,12 +207,14 @@ void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
}
}
-void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
- BytecodeLivenessState* next_bytecode_in_liveness,
- const interpreter::BytecodeArrayAccessor& accessor,
- const BytecodeLivenessMap& liveness_map) {
+void UpdateOutLiveness(
+ Bytecode bytecode,
+ BytecodeLivenessState& out_liveness, // NOLINT(runtime/references)
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
+ const BytecodeLivenessMap& liveness_map) {
int current_offset = accessor.current_offset();
- const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
// Special case Suspend and Resume to just pass through liveness.
if (bytecode == Bytecode::kSuspendGenerator ||
@@ -261,20 +269,24 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
}
}
-void UpdateLiveness(Bytecode bytecode, BytecodeLiveness& liveness,
+void UpdateLiveness(Bytecode bytecode,
+ BytecodeLiveness& liveness, // NOLINT(runtime/references)
BytecodeLivenessState** next_bytecode_in_liveness,
const interpreter::BytecodeArrayAccessor& accessor,
+ Handle<BytecodeArray> bytecode_array,
const BytecodeLivenessMap& liveness_map) {
UpdateOutLiveness(bytecode, *liveness.out, *next_bytecode_in_liveness,
- accessor, liveness_map);
+ accessor, bytecode_array, liveness_map);
liveness.in->CopyFrom(*liveness.out);
UpdateInLiveness(bytecode, *liveness.in, accessor);
*next_bytecode_in_liveness = liveness.in;
}
-void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
- const interpreter::BytecodeArrayAccessor& accessor) {
+void UpdateAssignments(
+ Bytecode bytecode,
+ BytecodeLoopAssignments& assignments, // NOLINT(runtime/references)
+ const interpreter::BytecodeArrayAccessor& accessor) {
int num_operands = Bytecodes::NumberOfOperands(bytecode);
const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
@@ -307,15 +319,13 @@ void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
} // namespace
-void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+void BytecodeAnalysis::Analyze() {
loop_stack_.push({-1, nullptr});
BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
-
- bool is_osr = !osr_bailout_id.IsNone();
- int osr_loop_end_offset = is_osr ? osr_bailout_id.ToInt() : -1;
-
int generator_switch_index = -1;
+ int osr_loop_end_offset = osr_bailout_id_.ToInt();
+ DCHECK_EQ(osr_loop_end_offset < 0, osr_bailout_id_.IsNone());
interpreter::BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
@@ -337,14 +347,14 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
if (current_offset == osr_loop_end_offset) {
osr_entry_point_ = loop_header;
} else if (current_offset < osr_loop_end_offset) {
- // Check we've found the osr_entry_point if we've gone past the
+ // Assert that we've found the osr_entry_point if we've gone past the
// osr_loop_end_offset. Note, we are iterating the bytecode in reverse,
- // so the less than in the check is correct.
- DCHECK_NE(-1, osr_entry_point_);
+ // so the less-than in the above condition is correct.
+ DCHECK_LE(0, osr_entry_point_);
}
// Save the index so that we can do another pass later.
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
loop_end_index_queue_.push_back(iterator.current_index());
}
} else if (loop_stack_.size() > 1) {
@@ -357,8 +367,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// information we currently have.
UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
- // Update suspend counts for this loop, though only if not OSR.
- if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ // Update suspend counts for this loop.
+ if (bytecode == Bytecode::kSuspendGenerator) {
int suspend_id = iterator.GetUnsignedImmediateOperand(3);
int resume_offset = current_offset + iterator.current_bytecode_size();
current_loop_info->AddResumeTarget(
@@ -412,7 +422,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
}
}
}
- } else if (!is_osr && bytecode == Bytecode::kSuspendGenerator) {
+ } else if (bytecode == Bytecode::kSuspendGenerator) {
// If we're not in a loop, we still need to look for suspends.
// TODO(leszeks): It would be nice to de-duplicate this with the in-loop
// case
@@ -422,11 +432,11 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
ResumeJumpTarget::Leaf(suspend_id, resume_offset));
}
- if (do_liveness_analysis_) {
+ if (analyze_liveness_) {
BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
current_offset, bytecode_array()->register_count(), zone());
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
@@ -435,7 +445,7 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK(ResumeJumpTargetsAreValid());
- if (!do_liveness_analysis_) return;
+ if (!analyze_liveness_) return;
// At this point, every bytecode has a valid in and out liveness, except for
// propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
@@ -489,12 +499,13 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
// Now we are at the loop header. Since the in-liveness of the header
// can't change, we need only to update the out-liveness.
UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
- next_bytecode_in_liveness, iterator, liveness_map_);
+ next_bytecode_in_liveness, iterator, bytecode_array(),
+ liveness_map_);
}
// Process the generator switch statement separately, once the loops are done.
@@ -533,12 +544,12 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK_NE(bytecode, Bytecode::kJumpLoop);
UpdateLiveness(bytecode, liveness, &next_bytecode_in_liveness, iterator,
- liveness_map_);
+ bytecode_array(), liveness_map_);
}
}
}
- DCHECK(do_liveness_analysis_);
+ DCHECK(analyze_liveness_);
if (FLAG_trace_environment_liveness) {
StdoutStream of;
PrintLivenessTo(of);
@@ -610,14 +621,14 @@ const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetInLiveness(offset);
}
const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
int offset) const {
- if (!do_liveness_analysis_) return nullptr;
+ if (!analyze_liveness_) return nullptr;
return liveness_map_.GetOutLiveness(offset);
}
@@ -662,9 +673,8 @@ bool BytecodeAnalysis::ResumeJumpTargetsAreValid() {
}
// If the iterator is invalid, we've reached the end without finding the
- // generator switch. Similarly, if we are OSR-ing, we're not resuming, so we
- // need no jump targets. So, ensure there are no jump targets and exit.
- if (!iterator.IsValid() || HasOsrEntryPoint()) {
+ // generator switch. So, ensure there are no jump targets and exit.
+ if (!iterator.IsValid()) {
// Check top-level.
if (!resume_jump_targets().empty()) {
PrintF(stderr,
@@ -758,14 +768,14 @@ bool BytecodeAnalysis::ResumeJumpTargetLeavesResolveSuspendIds(
valid = false;
} else {
// Make sure we're resuming to a Resume bytecode
- interpreter::BytecodeArrayAccessor assessor(bytecode_array(),
+ interpreter::BytecodeArrayAccessor accessor(bytecode_array(),
target.target_offset());
- if (assessor.current_bytecode() != Bytecode::kResumeGenerator) {
+ if (accessor.current_bytecode() != Bytecode::kResumeGenerator) {
PrintF(stderr,
"Expected resume target for id %d, offset %d, to be "
"ResumeGenerator, but found %s\n",
target.suspend_id(), target.target_offset(),
- Bytecodes::ToString(assessor.current_bytecode()));
+ Bytecodes::ToString(accessor.current_bytecode()));
valid = false;
}
@@ -820,7 +830,7 @@ bool BytecodeAnalysis::LivenessIsValid() {
previous_liveness.CopyFrom(*liveness.out);
UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
- iterator, liveness_map_);
+ iterator, bytecode_array(), liveness_map_);
// UpdateOutLiveness skips kJumpLoop, so we update it manually.
if (bytecode == Bytecode::kJumpLoop) {
int target_offset = iterator.GetJumpTargetOffset();
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index 53f86ca306..32c5168466 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -92,18 +92,14 @@ struct V8_EXPORT_PRIVATE LoopInfo {
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
};
-class V8_EXPORT_PRIVATE BytecodeAnalysis {
+// Analyze the bytecodes to find the loop ranges, loop nesting, loop assignments
+// and liveness. NOTE: The broker/serializer relies on the fact that an
+// analysis for OSR (osr_bailout_id is not None) subsumes an analysis for
+// non-OSR (osr_bailout_id is None).
+class V8_EXPORT_PRIVATE BytecodeAnalysis : public ZoneObject {
public:
BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
- bool do_liveness_analysis);
-
- // Analyze the bytecodes to find the loop ranges, loop nesting, loop
- // assignments and liveness, under the assumption that there is an OSR bailout
- // at {osr_bailout_id}.
- //
- // No other methods in this class return valid information until this has been
- // called.
- void Analyze(BailoutId osr_bailout_id);
+ BailoutId osr_bailout_id, bool analyze_liveness);
// Return true if the given offset is a loop header
bool IsLoopHeader(int offset) const;
@@ -118,23 +114,30 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
return resume_jump_targets_;
}
- // True if the current analysis has an OSR entry point.
- bool HasOsrEntryPoint() const { return osr_entry_point_ != -1; }
-
- int osr_entry_point() const { return osr_entry_point_; }
-
- // Gets the in-liveness for the bytecode at {offset}.
+ // Gets the in-/out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
-
- // Gets the out-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+ // In the case of OSR, the analysis also computes the (bytecode offset of the)
+ // OSR entry point from the {osr_bailout_id} that was given to the
+ // constructor.
+ int osr_entry_point() const {
+ CHECK_LE(0, osr_entry_point_);
+ return osr_entry_point_;
+ }
+ // Return the osr_bailout_id (for verification purposes).
+ BailoutId osr_bailout_id() const { return osr_bailout_id_; }
+
+ // Return whether liveness analysis was performed (for verification purposes).
+ bool liveness_analyzed() const { return analyze_liveness_; }
+
private:
struct LoopStackEntry {
int header_offset;
LoopInfo* loop_info;
};
+ void Analyze();
void PushLoop(int loop_header, int loop_end);
#if DEBUG
@@ -153,17 +156,15 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis {
std::ostream& PrintLivenessTo(std::ostream& os) const;
Handle<BytecodeArray> const bytecode_array_;
- bool const do_liveness_analysis_;
Zone* const zone_;
-
+ BailoutId const osr_bailout_id_;
+ bool const analyze_liveness_;
ZoneStack<LoopStackEntry> loop_stack_;
ZoneVector<int> loop_end_index_queue_;
ZoneVector<ResumeJumpTarget> resume_jump_targets_;
-
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
int osr_entry_point_;
-
BytecodeLivenessMap liveness_map_;
DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 0ab8f85670..7c71446320 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -6,9 +6,11 @@
#include "src/ast/ast.h"
#include "src/codegen/source-position-table.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -32,14 +34,15 @@ namespace compiler {
class BytecodeGraphBuilder {
public:
BytecodeGraphBuilder(JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared,
- Handle<FeedbackVector> feedback_vector,
- BailoutId osr_offset, JSGraph* jsgraph,
+ BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared,
+ FeedbackVectorRef feedback_vector, BailoutId osr_offset,
+ JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ NativeContextRef native_context, int inlining_id,
+ BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
// Creates a graph by visiting bytecodes.
void CreateGraph();
@@ -318,12 +321,8 @@ class BytecodeGraphBuilder {
return jsgraph_->simplified();
}
Zone* local_zone() const { return local_zone_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
- const Handle<FeedbackVector>& feedback_vector() const {
- return feedback_vector_;
- }
+ const BytecodeArrayRef bytecode_array() const { return bytecode_array_; }
+ FeedbackVectorRef feedback_vector() const { return feedback_vector_; }
const JSTypeHintLowering& type_hint_lowering() const {
return type_hint_lowering_;
}
@@ -332,7 +331,7 @@ class BytecodeGraphBuilder {
}
SourcePositionTableIterator& source_position_iterator() {
- return source_position_iterator_;
+ return *source_position_iterator_.get();
}
interpreter::BytecodeArrayIterator& bytecode_iterator() {
@@ -343,8 +342,6 @@ class BytecodeGraphBuilder {
return bytecode_analysis_;
}
- void RunBytecodeAnalysis() { bytecode_analysis_.Analyze(osr_offset_); }
-
int currently_peeled_loop_offset() const {
return currently_peeled_loop_offset_;
}
@@ -368,9 +365,9 @@ class BytecodeGraphBuilder {
needs_eager_checkpoint_ = value;
}
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ SharedFunctionInfoRef shared_info() const { return shared_info_; }
- Handle<Context> native_context() const { return native_context_; }
+ NativeContextRef native_context() const { return native_context_; }
JSHeapBroker* broker() const { return broker_; }
@@ -382,15 +379,15 @@ class BytecodeGraphBuilder {
Zone* const local_zone_;
JSGraph* const jsgraph_;
CallFrequency const invocation_frequency_;
- Handle<BytecodeArray> const bytecode_array_;
- Handle<FeedbackVector> const feedback_vector_;
+ BytecodeArrayRef const bytecode_array_;
+ FeedbackVectorRef feedback_vector_;
JSTypeHintLowering const type_hint_lowering_;
const FrameStateFunctionInfo* const frame_state_function_info_;
- SourcePositionTableIterator source_position_iterator_;
+ std::unique_ptr<SourcePositionTableIterator> source_position_iterator_;
interpreter::BytecodeArrayIterator bytecode_iterator_;
- BytecodeAnalysis bytecode_analysis_;
+ BytecodeAnalysis const& bytecode_analysis_;
Environment* environment_;
- BailoutId const osr_offset_;
+ bool const osr_;
int currently_peeled_loop_offset_;
bool skip_next_stack_check_;
@@ -434,10 +431,12 @@ class BytecodeGraphBuilder {
SourcePosition const start_position_;
- Handle<SharedFunctionInfo> const shared_info_;
+ SharedFunctionInfoRef const shared_info_;
// The native context for which we optimize.
- Handle<Context> const native_context_;
+ NativeContextRef const native_context_;
+
+ TickCounter* const tick_counter_;
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
@@ -938,13 +937,12 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- JSHeapBroker* broker, Zone* local_zone,
- Handle<BytecodeArray> bytecode_array,
- Handle<SharedFunctionInfo> shared_info,
- Handle<FeedbackVector> feedback_vector, BailoutId osr_offset,
- JSGraph* jsgraph, CallFrequency const& invocation_frequency,
- SourcePositionTable* source_positions, Handle<Context> native_context,
- int inlining_id, BytecodeGraphBuilderFlags flags)
+ JSHeapBroker* broker, Zone* local_zone, BytecodeArrayRef bytecode_array,
+ SharedFunctionInfoRef shared_info, FeedbackVectorRef feedback_vector,
+ BailoutId osr_offset, JSGraph* jsgraph,
+ CallFrequency const& invocation_frequency,
+ SourcePositionTable* source_positions, NativeContextRef native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
: broker_(broker),
local_zone_(local_zone),
jsgraph_(jsgraph),
@@ -952,22 +950,22 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array_(bytecode_array),
feedback_vector_(feedback_vector),
type_hint_lowering_(
- jsgraph, feedback_vector,
+ jsgraph, feedback_vector.object(),
(flags & BytecodeGraphBuilderFlag::kBailoutOnUninitialized)
? JSTypeHintLowering::kBailoutOnUninitialized
: JSTypeHintLowering::kNoFlags),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
- bytecode_array->parameter_count(), bytecode_array->register_count(),
- shared_info)),
- source_position_iterator_(
- handle(bytecode_array->SourcePositionTableIfCollected(), isolate())),
- bytecode_iterator_(bytecode_array),
- bytecode_analysis_(
- bytecode_array, local_zone,
- flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness),
+ bytecode_array.parameter_count(), bytecode_array.register_count(),
+ shared_info.object())),
+ bytecode_iterator_(
+ base::make_unique<OffHeapBytecodeArray>(bytecode_array)),
+ bytecode_analysis_(broker_->GetBytecodeAnalysis(
+ bytecode_array.object(), osr_offset,
+ flags & BytecodeGraphBuilderFlag::kAnalyzeEnvironmentLiveness,
+ !FLAG_concurrent_inlining)),
environment_(nullptr),
- osr_offset_(osr_offset),
+ osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
skip_next_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
@@ -981,9 +979,23 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
exit_controls_(local_zone),
state_values_cache_(jsgraph),
source_positions_(source_positions),
- start_position_(shared_info->StartPosition(), inlining_id),
+ start_position_(shared_info.StartPosition(), inlining_id),
shared_info_(shared_info),
- native_context_(native_context) {}
+ native_context_(native_context),
+ tick_counter_(tick_counter) {
+ if (FLAG_concurrent_inlining) {
+ // With concurrent inlining on, the source position address doesn't change
+ // because it's been copied from the heap.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ Vector<const byte>(bytecode_array.source_positions_address(),
+ bytecode_array.source_positions_size()));
+ } else {
+ // Otherwise, we need to access the table through a handle.
+ source_position_iterator_ = base::make_unique<SourcePositionTableIterator>(
+ handle(bytecode_array.object()->SourcePositionTableIfCollected(),
+ isolate()));
+ }
+}
Node* BytecodeGraphBuilder::GetFunctionClosure() {
if (!function_closure_.is_set()) {
@@ -997,33 +1009,30 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
- NodeProperties::ReplaceContextInput(
- result, jsgraph()->HeapConstant(native_context()));
+ NodeProperties::ReplaceContextInput(result,
+ jsgraph()->Constant(native_context()));
return result;
}
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
- FeedbackNexus nexus(feedback_vector(), slot);
- return VectorSlotPair(feedback_vector(), slot, nexus.ic_state());
+ FeedbackNexus nexus(feedback_vector().object(), slot);
+ return VectorSlotPair(feedback_vector().object(), slot, nexus.ic_state());
}
void BytecodeGraphBuilder::CreateGraph() {
- BytecodeArrayRef bytecode_array_ref(broker(), bytecode_array());
-
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
- int actual_parameter_count = bytecode_array_ref.parameter_count() + 4;
+ int actual_parameter_count = bytecode_array().parameter_count() + 4;
graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
- Environment env(
- this, bytecode_array_ref.register_count(),
- bytecode_array_ref.parameter_count(),
- bytecode_array_ref.incoming_new_target_or_generator_register(),
- graph()->start());
+ Environment env(this, bytecode_array().register_count(),
+ bytecode_array().parameter_count(),
+ bytecode_array().incoming_new_target_or_generator_register(),
+ graph()->start());
set_environment(&env);
VisitBytecodes();
@@ -1112,19 +1121,17 @@ class BytecodeGraphBuilder::OsrIteratorState {
void ProcessOsrPrelude() {
ZoneVector<int> outer_loop_offsets(graph_builder_->local_zone());
- BytecodeAnalysis const& bytecode_analysis =
- graph_builder_->bytecode_analysis();
- int osr_offset = bytecode_analysis.osr_entry_point();
+ int osr_entry = graph_builder_->bytecode_analysis().osr_entry_point();
// We find here the outermost loop which contains the OSR loop.
- int outermost_loop_offset = osr_offset;
- while ((outermost_loop_offset =
- bytecode_analysis.GetLoopInfoFor(outermost_loop_offset)
- .parent_offset()) != -1) {
+ int outermost_loop_offset = osr_entry;
+ while ((outermost_loop_offset = graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(outermost_loop_offset)
+ .parent_offset()) != -1) {
outer_loop_offsets.push_back(outermost_loop_offset);
}
outermost_loop_offset =
- outer_loop_offsets.empty() ? osr_offset : outer_loop_offsets.back();
+ outer_loop_offsets.empty() ? osr_entry : outer_loop_offsets.back();
graph_builder_->AdvanceIteratorsTo(outermost_loop_offset);
// We save some iterators states at the offsets of the loop headers of the
@@ -1142,14 +1149,16 @@ class BytecodeGraphBuilder::OsrIteratorState {
}
// Finishing by advancing to the OSR entry
- graph_builder_->AdvanceIteratorsTo(osr_offset);
+ graph_builder_->AdvanceIteratorsTo(osr_entry);
// Enters all remaining exception handler which end before the OSR loop
// so that on next call of VisitSingleBytecode they will get popped from
// the exception handlers stack.
- graph_builder_->ExitThenEnterExceptionHandlers(osr_offset);
+ graph_builder_->ExitThenEnterExceptionHandlers(osr_entry);
graph_builder_->set_currently_peeled_loop_offset(
- bytecode_analysis.GetLoopInfoFor(osr_offset).parent_offset());
+ graph_builder_->bytecode_analysis()
+ .GetLoopInfoFor(osr_entry)
+ .parent_offset());
}
void RestoreState(int target_offset, int new_parent_offset) {
@@ -1198,8 +1207,8 @@ void BytecodeGraphBuilder::RemoveMergeEnvironmentsBeforeOffset(
void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
OsrIteratorState iterator_states(this);
iterator_states.ProcessOsrPrelude();
- int osr_offset = bytecode_analysis().osr_entry_point();
- DCHECK_EQ(bytecode_iterator().current_offset(), osr_offset);
+ int osr_entry = bytecode_analysis().osr_entry_point();
+ DCHECK_EQ(bytecode_iterator().current_offset(), osr_entry);
environment()->FillWithOsrValues();
@@ -1217,7 +1226,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
// parent loop entirely, and so on.
int current_parent_offset =
- bytecode_analysis().GetLoopInfoFor(osr_offset).parent_offset();
+ bytecode_analysis().GetLoopInfoFor(osr_entry).parent_offset();
while (current_parent_offset != -1) {
const LoopInfo& current_parent_loop =
bytecode_analysis().GetLoopInfoFor(current_parent_offset);
@@ -1261,6 +1270,7 @@ void BytecodeGraphBuilder::AdvanceToOsrEntryAndPeelLoops() {
}
void BytecodeGraphBuilder::VisitSingleBytecode() {
+ tick_counter_->DoTick();
int current_offset = bytecode_iterator().current_offset();
UpdateSourcePosition(current_offset);
ExitThenEnterExceptionHandlers(current_offset);
@@ -1289,14 +1299,12 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
}
void BytecodeGraphBuilder::VisitBytecodes() {
- RunBytecodeAnalysis();
-
if (!bytecode_analysis().resume_jump_targets().empty()) {
environment()->BindGeneratorState(
jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
}
- if (bytecode_analysis().HasOsrEntryPoint()) {
+ if (osr_) {
// We peel the OSR loop and any outer loop containing it except that we
// leave the nodes corresponding to the whole outermost loop (including
// the last copies of the loops it contains) to be generated by the normal
@@ -1333,7 +1341,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
void BytecodeGraphBuilder::VisitLdaConstant() {
Node* node = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
environment()->BindAccumulator(node);
}
@@ -1383,15 +1391,16 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
- DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
+ DCHECK(
+ IsLoadGlobalICKind(feedback_vector().object()->GetKind(feedback.slot())));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
return NewNode(op);
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
@@ -1400,8 +1409,8 @@ void BytecodeGraphBuilder::VisitLdaGlobal() {
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
@@ -1410,8 +1419,8 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1537,7 +1546,7 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
@@ -1622,7 +1631,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1657,9 +1666,8 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1675,7 +1683,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op =
javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
@@ -1705,7 +1713,7 @@ void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -1729,8 +1737,8 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
@@ -1753,8 +1761,8 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->LoadNamed(name, VectorSlotPair());
Node* node = NewNode(op, object);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1788,8 +1796,8 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
VectorSlotPair feedback =
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
@@ -1828,8 +1836,8 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<Name> name(
- Name::cast(bytecode_iterator().GetConstantForIndexOperand(1)), isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -1902,10 +1910,8 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- Handle<SharedFunctionInfo> shared_info(
- SharedFunctionInfo::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -1913,7 +1919,7 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
: AllocationType::kYoung;
const Operator* op = javascript()->CreateClosure(
shared_info,
- feedback_vector()->GetClosureFeedbackCell(
+ feedback_vector().object()->GetClosureFeedbackCell(
bytecode_iterator().GetIndexOperand(1)),
handle(jsgraph()->isolate()->builtins()->builtin(Builtins::kCompileLazy),
isolate()),
@@ -1923,9 +1929,8 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* context = NewNode(op);
@@ -1933,9 +1938,8 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, FUNCTION_SCOPE);
@@ -1944,9 +1948,8 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op =
javascript()->CreateFunctionContext(scope_info, slots, EVAL_SCOPE);
@@ -1957,9 +1960,8 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateCatchContext(scope_info);
Node* context = NewNode(op, exception);
@@ -1969,9 +1971,8 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Handle<ScopeInfo> scope_info(
- ScopeInfo::cast(bytecode_iterator().GetConstantForIndexOperand(1)),
- isolate());
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->CreateWithContext(scope_info);
Node* context = NewNode(op, object);
@@ -1997,9 +1998,8 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- Handle<String> constant_pattern(
- String::cast(bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<String> constant_pattern = Handle<String>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2009,10 +2009,9 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<ArrayBoilerplateDescription> array_boilerplate_description(
- ArrayBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ArrayBoilerplateDescription> array_boilerplate_description =
+ Handle<ArrayBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2046,10 +2045,9 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- Handle<ObjectBoilerplateDescription> constant_properties(
- ObjectBoilerplateDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ Handle<ObjectBoilerplateDescription> constant_properties =
+ Handle<ObjectBoilerplateDescription>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
int const slot_id = bytecode_iterator().GetIndexOperand(1);
VectorSlotPair pair = CreateVectorSlotPair(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2082,29 +2080,13 @@ void BytecodeGraphBuilder::VisitCloneObject() {
}
void BytecodeGraphBuilder::VisitGetTemplateObject() {
- Handle<TemplateObjectDescription> description(
- TemplateObjectDescription::cast(
- bytecode_iterator().GetConstantForIndexOperand(0)),
- isolate());
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
-
- Handle<JSArray> cached_value;
- if (nexus.GetFeedback() == MaybeObject::FromSmi(Smi::zero())) {
- // It's not observable when the template object is created, so we
- // can just create it eagerly during graph building and bake in
- // the JSArray constant here.
- cached_value = TemplateObjectDescription::GetTemplateObject(
- isolate(), native_context(), description, shared_info(), slot.ToInt());
- nexus.vector().Set(slot, *cached_value);
- } else {
- cached_value =
- handle(JSArray::cast(nexus.GetFeedback()->GetHeapObjectAssumeStrong()),
- isolate());
- }
-
- Node* template_object = jsgraph()->HeapConstant(cached_value);
- environment()->BindAccumulator(template_object);
+ ObjectRef description(
+ broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
+ JSArrayRef template_object =
+ shared_info().GetTemplateObject(description, feedback_vector(), slot);
+ environment()->BindAccumulator(jsgraph()->Constant(template_object));
}
Node* const* BytecodeGraphBuilder::GetCallArgumentsFromRegisters(
@@ -2587,7 +2569,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
Node* name = jsgraph()->Constant(
- handle(bytecode_iterator().GetConstantForIndexOperand(0), isolate()));
+ bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -2658,7 +2640,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetBinaryOperationFeedback();
}
@@ -2666,14 +2648,14 @@ BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
// feedback.
CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(1);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
return nexus.GetCompareOperationFeedback();
}
// Helper function to create for-in mode from the recorded type feedback.
ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
FeedbackSlot slot = bytecode_iterator().GetSlotOperand(operand_index);
- FeedbackNexus nexus(feedback_vector(), slot);
+ FeedbackNexus nexus(feedback_vector().object(), slot);
switch (nexus.GetForInFeedback()) {
case ForInHint::kNone:
case ForInHint::kEnumCacheKeysAndIndices:
@@ -2688,7 +2670,8 @@ ForInMode BytecodeGraphBuilder::GetForInMode(int operand_index) {
CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
if (invocation_frequency_.IsUnknown()) return CallFrequency();
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
float feedback_frequency = nexus.ComputeCallFrequency();
if (feedback_frequency == 0.0f) {
// This is to prevent multiplying zero and infinity.
@@ -2699,7 +2682,8 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
}
SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
- FeedbackNexus nexus(feedback_vector(), FeedbackVector::ToSlot(slot_id));
+ FeedbackNexus nexus(feedback_vector().object(),
+ FeedbackVector::ToSlot(slot_id));
return nexus.GetSpeculationMode();
}
@@ -3301,8 +3285,7 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
CHECK_EQ(0, first_reg.index());
int register_count =
static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
Node* suspend_id = jsgraph()->SmiConstant(
bytecode_iterator().GetUnsignedImmediateOperand(3));
@@ -3442,8 +3425,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
const BytecodeLivenessState* liveness = bytecode_analysis().GetOutLivenessFor(
bytecode_iterator().current_offset());
- int parameter_count_without_receiver =
- bytecode_array()->parameter_count() - 1;
+ int parameter_count_without_receiver = bytecode_array().parameter_count() - 1;
// Mapping between registers and array indices must match that used in
// InterpreterAssembler::ExportParametersAndRegisterFile.
@@ -3836,7 +3818,10 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
}
void BytecodeGraphBuilder::ExitThenEnterExceptionHandlers(int current_offset) {
- HandlerTable table(*bytecode_array());
+ DisallowHeapAllocation no_allocation;
+ HandlerTable table(bytecode_array().handler_table_address(),
+ bytecode_array().handler_table_size(),
+ HandlerTable::kRangeBasedEncoding);
// Potentially exit exception handlers.
while (!exception_handlers_.empty()) {
@@ -3890,7 +3875,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = OperatorProperties::NeedsExactContext(op)
? environment()->Context()
- : jsgraph()->HeapConstant(native_context());
+ : jsgraph()->Constant(native_context());
}
if (has_frame_state) {
// The frame state will be inserted later. Here we misuse the {Dead} node
@@ -4037,12 +4022,19 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags) {
- BytecodeGraphBuilder builder(broker, local_zone, bytecode_array, shared,
- feedback_vector, osr_offset, jsgraph,
- invocation_frequency, source_positions,
- native_context, inlining_id, flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter) {
+ BytecodeArrayRef bytecode_array_ref(broker, bytecode_array);
+ DCHECK(bytecode_array_ref.IsSerializedForCompilation());
+ FeedbackVectorRef feedback_vector_ref(broker, feedback_vector);
+ SharedFunctionInfoRef shared_ref(broker, shared);
+ DCHECK(shared_ref.IsSerializedForCompilation(feedback_vector_ref));
+ NativeContextRef native_context_ref(broker, native_context);
+ BytecodeGraphBuilder builder(
+ broker, local_zone, bytecode_array_ref, shared_ref, feedback_vector_ref,
+ osr_offset, jsgraph, invocation_frequency, source_positions,
+ native_context_ref, inlining_id, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index b9504a6086..682569778f 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -11,6 +11,9 @@
#include "src/handles/handles.h"
namespace v8 {
+
+class TickCounter;
+
namespace internal {
class BytecodeArray;
@@ -25,6 +28,9 @@ class SourcePositionTable;
enum class BytecodeGraphBuilderFlag : uint8_t {
kSkipFirstStackCheck = 1 << 0,
+ // TODO(neis): Remove liveness flag here when concurrent inlining is always
+ // on, because then the serializer will be the only place where we perform
+ // bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
};
@@ -39,8 +45,9 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- Handle<Context> native_context, int inlining_id,
- BytecodeGraphBuilderFlags flags);
+ Handle<NativeContext> native_context,
+ int inlining_id, BytecodeGraphBuilderFlags flags,
+ TickCounter* tick_counter);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index d8a01d6308..af0ba98ffd 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -226,8 +226,12 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(Node* node,
IntPtrConstant(kHeapObjectTagMask)),
IntPtrConstant(kWeakHeapObjectTag)),
&ok);
- Node* message_node = StringConstant(location);
- DebugAbort(message_node);
+ EmbeddedVector<char, 1024> message;
+ SNPrintF(message, "no Object: %s", location);
+ Node* message_node = StringConstant(message.begin());
+ // This somewhat misuses the AbortCSAAssert runtime function. This will print
+ // "abort: CSA_ASSERT failed: <message>", which is good enough.
+ AbortCSAAssert(message_node);
Unreachable();
Bind(&ok);
}
@@ -409,8 +413,8 @@ void CodeAssembler::ReturnRaw(Node* value) {
return raw_assembler()->Return(value);
}
-void CodeAssembler::DebugAbort(Node* message) {
- raw_assembler()->DebugAbort(message);
+void CodeAssembler::AbortCSAAssert(Node* message) {
+ raw_assembler()->AbortCSAAssert(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
@@ -441,16 +445,16 @@ void CodeAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
}
#endif // DEBUG
-Node* CodeAssembler::LoadFramePointer() {
- return raw_assembler()->LoadFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadFramePointer());
}
-Node* CodeAssembler::LoadParentFramePointer() {
- return raw_assembler()->LoadParentFramePointer();
+TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-Node* CodeAssembler::LoadStackPointer() {
- return raw_assembler()->LoadStackPointer();
+TNode<RawPtrT> CodeAssembler::LoadStackPointer() {
+ return UncheckedCast<RawPtrT>(raw_assembler()->LoadStackPointer());
}
TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(
@@ -1140,14 +1144,6 @@ Node* CodeAssembler::Retain(Node* value) {
return raw_assembler()->Retain(value);
}
-Node* CodeAssembler::ChangeTaggedToCompressed(Node* tagged) {
- return raw_assembler()->ChangeTaggedToCompressed(tagged);
-}
-
-Node* CodeAssembler::ChangeCompressedToTagged(Node* compressed) {
- return raw_assembler()->ChangeCompressedToTagged(compressed);
-}
-
Node* CodeAssembler::Projection(int index, Node* value) {
DCHECK_LT(index, value->op()->ValueOutputCount());
return raw_assembler()->Projection(index, value);
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0f7ae64082..cc432214aa 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -73,6 +73,9 @@ class PromiseReactionJobTask;
class PromiseRejectReactionJobTask;
class WasmDebugInfo;
class Zone;
+#define MAKE_FORWARD_DECLARATION(V, NAME, Name, name) class Name;
+TORQUE_STRUCT_LIST_GENERATOR(MAKE_FORWARD_DECLARATION, UNUSED)
+#undef MAKE_FORWARD_DECLARATION
template <typename T>
class Signature;
@@ -107,13 +110,13 @@ struct Uint32T : Word32T {
struct Int16T : Int32T {
static constexpr MachineType kMachineType = MachineType::Int16();
};
-struct Uint16T : Uint32T {
+struct Uint16T : Uint32T, Int32T {
static constexpr MachineType kMachineType = MachineType::Uint16();
};
struct Int8T : Int16T {
static constexpr MachineType kMachineType = MachineType::Int8();
};
-struct Uint8T : Uint16T {
+struct Uint8T : Uint16T, Int16T {
static constexpr MachineType kMachineType = MachineType::Uint8();
};
@@ -147,6 +150,12 @@ struct Float64T : UntaggedT {
static constexpr MachineType kMachineType = MachineType::Float64();
};
+#ifdef V8_COMPRESS_POINTERS
+using TaggedT = Int32T;
+#else
+using TaggedT = IntPtrT;
+#endif
+
// Result of a comparison operation.
struct BoolT : Word32T {};
@@ -329,6 +338,7 @@ class WasmExceptionObject;
class WasmExceptionTag;
class WasmExportedFunctionData;
class WasmGlobalObject;
+class WasmIndirectFunctionTable;
class WasmJSFunctionData;
class WasmMemoryObject;
class WasmModuleObject;
@@ -413,6 +423,10 @@ struct types_have_common_values {
static const bool value = is_subtype<T, U>::value || is_subtype<U, T>::value;
};
template <class U>
+struct types_have_common_values<BoolT, U> {
+ static const bool value = types_have_common_values<Word32T, U>::value;
+};
+template <class U>
struct types_have_common_values<Uint32T, U> {
static const bool value = types_have_common_values<Word32T, U>::value;
};
@@ -611,14 +625,15 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(Float64Sqrt, Float64T, Float64T) \
V(Float64Tan, Float64T, Float64T) \
V(Float64Tanh, Float64T, Float64T) \
- V(Float64ExtractLowWord32, Word32T, Float64T) \
- V(Float64ExtractHighWord32, Word32T, Float64T) \
+ V(Float64ExtractLowWord32, Uint32T, Float64T) \
+ V(Float64ExtractHighWord32, Uint32T, Float64T) \
V(BitcastTaggedToWord, IntPtrT, Object) \
+ V(BitcastTaggedSignedToWord, IntPtrT, Smi) \
V(BitcastMaybeObjectToWord, IntPtrT, MaybeObject) \
V(BitcastWordToTagged, Object, WordT) \
V(BitcastWordToTaggedSigned, Smi, WordT) \
V(TruncateFloat64ToFloat32, Float32T, Float64T) \
- V(TruncateFloat64ToWord32, Word32T, Float64T) \
+ V(TruncateFloat64ToWord32, Uint32T, Float64T) \
V(TruncateInt64ToInt32, Int32T, Int64T) \
V(ChangeFloat32ToFloat64, Float64T, Float32T) \
V(ChangeFloat64ToUint32, Uint32T, Float64T) \
@@ -628,7 +643,7 @@ TNode<Float64T> Float64Add(TNode<Float64T> a, TNode<Float64T> b);
V(ChangeUint32ToFloat64, Float64T, Word32T) \
V(ChangeUint32ToUint64, Uint64T, Word32T) \
V(BitcastInt32ToFloat32, Float32T, Word32T) \
- V(BitcastFloat32ToInt32, Word32T, Float32T) \
+ V(BitcastFloat32ToInt32, Uint32T, Float32T) \
V(RoundFloat64ToInt32, Int32T, Float64T) \
V(RoundInt32ToFloat32, Int32T, Float32T) \
V(Float64SilenceNaN, Float64T, Float64T) \
@@ -840,10 +855,13 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// TODO(jkummerow): The style guide wants pointers for output parameters.
// https://google.github.io/styleguide/cppguide.html#Output_Parameters
- bool ToInt32Constant(Node* node, int32_t& out_value);
- bool ToInt64Constant(Node* node, int64_t& out_value);
+ bool ToInt32Constant(Node* node,
+ int32_t& out_value); // NOLINT(runtime/references)
+ bool ToInt64Constant(Node* node,
+ int64_t& out_value); // NOLINT(runtime/references)
bool ToSmiConstant(Node* node, Smi* out_value);
- bool ToIntPtrConstant(Node* node, intptr_t& out_value);
+ bool ToIntPtrConstant(Node* node,
+ intptr_t& out_value); // NOLINT(runtime/references)
bool IsUndefinedConstant(TNode<Object> node);
bool IsNullConstant(TNode<Object> node);
@@ -872,7 +890,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnRaw(Node* value);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
@@ -938,11 +956,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Label** case_labels, size_t case_count);
// Access to the frame pointer
- Node* LoadFramePointer();
- Node* LoadParentFramePointer();
+ TNode<RawPtrT> LoadFramePointer();
+ TNode<RawPtrT> LoadParentFramePointer();
// Access to the stack pointer
- Node* LoadStackPointer();
+ TNode<RawPtrT> LoadStackPointer();
// Poison |value| on speculative paths.
TNode<Object> TaggedPoisonOnSpeculation(SloppyTNode<Object> value);
@@ -1047,20 +1065,60 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
- TNode<IntPtrT> WordShr(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
+ TNode<UintPtrT> WordShr(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
WordShr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordSar(TNode<IntPtrT> left, TNode<IntegralT> right) {
- return UncheckedCast<IntPtrT>(
- WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ return Signed(WordSar(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<IntPtrT> WordShl(TNode<IntPtrT> left, TNode<IntegralT> right) {
+ return Signed(WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordShl(TNode<UintPtrT> left, TNode<IntegralT> right) {
+ return Unsigned(
+ WordShl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Shl(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shl(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shl(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Shr(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Shr(static_cast<Node*>(left), static_cast<Node*>(right)));
}
TNode<IntPtrT> WordAnd(TNode<IntPtrT> left, TNode<IntPtrT> right) {
- return UncheckedCast<IntPtrT>(
+ return Signed(WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<UintPtrT> WordAnd(TNode<UintPtrT> left, TNode<UintPtrT> right) {
+ return Unsigned(
WordAnd(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Word32And(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32And(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Word32Or(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<Uint32T> Word32Or(TNode<Uint32T> left, TNode<Uint32T> right) {
+ return Unsigned(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
template <class Left, class Right,
class = typename std::enable_if<
std::is_base_of<Object, Left>::value &&
@@ -1106,6 +1164,15 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Word64NotEqual(SloppyTNode<Word64T> left,
SloppyTNode<Word64T> right);
+ TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32Or(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+ TNode<BoolT> Word32And(TNode<BoolT> left, TNode<BoolT> right) {
+ return UncheckedCast<BoolT>(
+ Word32And(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<Int32T> Int32Add(TNode<Int32T> left, TNode<Int32T> right) {
return Signed(
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
@@ -1116,6 +1183,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Int32Add(static_cast<Node*>(left), static_cast<Node*>(right)));
}
+ TNode<Int32T> Int32Sub(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Sub(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
+ TNode<Int32T> Int32Mul(TNode<Int32T> left, TNode<Int32T> right) {
+ return Signed(
+ Int32Mul(static_cast<Node*>(left), static_cast<Node*>(right)));
+ }
+
TNode<WordT> IntPtrAdd(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
TNode<IntPtrT> IntPtrDiv(TNode<IntPtrT> left, TNode<IntPtrT> right);
TNode<WordT> IntPtrSub(SloppyTNode<WordT> left, SloppyTNode<WordT> right);
@@ -1195,6 +1272,12 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_UNARY_OP_LIST(DECLARE_CODE_ASSEMBLER_UNARY_OP)
#undef DECLARE_CODE_ASSEMBLER_UNARY_OP
+ template <class Dummy = void>
+ TNode<IntPtrT> BitcastTaggedToWord(TNode<Smi> node) {
+ static_assert(sizeof(Dummy) < 0,
+ "Should use BitcastTaggedSignedToWord instead.");
+ }
+
// Changes a double to an inptr_t for pointer arithmetic outside of Smi range.
// Assumes that the double can be exactly represented as an int.
TNode<UintPtrT> ChangeFloat64ToUintPtr(SloppyTNode<Float64T> value);
@@ -1217,10 +1300,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Projections
Node* Projection(int index, Node* value);
- // Pointer compression and decompression.
- Node* ChangeTaggedToCompressed(Node* tagged);
- Node* ChangeCompressedToTagged(Node* compressed);
-
template <int index, class T1, class T2>
TNode<typename std::tuple_element<index, std::tuple<T1, T2>>::type>
Projection(TNode<PairT<T1, T2>> value) {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index fa727748f6..5dd765527f 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -337,9 +337,9 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// End
// Now the effect input to the {Return} node can be either an {EffectPhi}
- // hanging off the same {Merge}, or the {Merge} node is only connected to
- // the {Return} and the {Phi}, in which case we know that the effect input
- // must somehow dominate all merged branches.
+ // hanging off the same {Merge}, or the effect chain doesn't depend on the
+ // {Phi} or the {Merge}, in which case we know that the effect input must
+ // somehow dominate all merged branches.
Node::Inputs control_inputs = control->inputs();
Node::Inputs value_inputs = value->inputs();
@@ -347,7 +347,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- if (control->OwnedBy(node, value)) {
+ if (control->OwnedBy(node, value) && value->OwnedBy(node)) {
for (int i = 0; i < control_inputs.count(); ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 45e558f609..0ef6402264 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -1216,8 +1216,18 @@ const Operator* CommonOperatorBuilder::HeapConstant(
value); // parameter
}
+const Operator* CommonOperatorBuilder::CompressedHeapConstant(
+ const Handle<HeapObject>& value) {
+ return new (zone()) Operator1<Handle<HeapObject>>( // --
+ IrOpcode::kCompressedHeapConstant, Operator::kPure, // opcode
+ "CompressedHeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
+}
+
Handle<HeapObject> HeapConstantOf(const Operator* op) {
- DCHECK_EQ(IrOpcode::kHeapConstant, op->opcode());
+ DCHECK(IrOpcode::kHeapConstant == op->opcode() ||
+ IrOpcode::kCompressedHeapConstant == op->opcode());
return OpParameter<Handle<HeapObject>>(op);
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 43a689b5c2..9f634e72ec 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -499,6 +499,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* NumberConstant(volatile double);
const Operator* PointerConstant(intptr_t);
const Operator* HeapConstant(const Handle<HeapObject>&);
+ const Operator* CompressedHeapConstant(const Handle<HeapObject>&);
const Operator* ObjectId(uint32_t);
const Operator* RelocatableInt32Constant(int32_t value,
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index f0bb797b68..673f4a341b 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -4,6 +4,7 @@
#include "src/compiler/compilation-dependencies.h"
+#include "src/compiler/compilation-dependency.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/objects-inl.h"
@@ -17,18 +18,7 @@ CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {}
-class CompilationDependencies::Dependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(const MaybeObjectHandle& code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
-#endif
-};
-
-class InitialMapDependency final : public CompilationDependencies::Dependency {
+class InitialMapDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the initial map.
@@ -56,8 +46,7 @@ class InitialMapDependency final : public CompilationDependencies::Dependency {
MapRef initial_map_;
};
-class PrototypePropertyDependency final
- : public CompilationDependencies::Dependency {
+class PrototypePropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the prototype.
@@ -96,7 +85,7 @@ class PrototypePropertyDependency final
ObjectRef prototype_;
};
-class StableMapDependency final : public CompilationDependencies::Dependency {
+class StableMapDependency final : public CompilationDependency {
public:
explicit StableMapDependency(const MapRef& map) : map_(map) {
DCHECK(map_.is_stable());
@@ -114,7 +103,7 @@ class StableMapDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class TransitionDependency final : public CompilationDependencies::Dependency {
+class TransitionDependency final : public CompilationDependency {
public:
explicit TransitionDependency(const MapRef& map) : map_(map) {
DCHECK(!map_.is_deprecated());
@@ -132,8 +121,7 @@ class TransitionDependency final : public CompilationDependencies::Dependency {
MapRef map_;
};
-class PretenureModeDependency final
- : public CompilationDependencies::Dependency {
+class PretenureModeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the mode.
@@ -163,8 +151,7 @@ class PretenureModeDependency final
AllocationType allocation_;
};
-class FieldRepresentationDependency final
- : public CompilationDependencies::Dependency {
+class FieldRepresentationDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the representation.
@@ -197,7 +184,7 @@ class FieldRepresentationDependency final
Representation representation_;
};
-class FieldTypeDependency final : public CompilationDependencies::Dependency {
+class FieldTypeDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type.
@@ -227,8 +214,7 @@ class FieldTypeDependency final : public CompilationDependencies::Dependency {
ObjectRef type_;
};
-class FieldConstnessDependency final
- : public CompilationDependencies::Dependency {
+class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& owner, int descriptor)
: owner_(owner), descriptor_(descriptor) {
@@ -255,8 +241,7 @@ class FieldConstnessDependency final
int descriptor_;
};
-class GlobalPropertyDependency final
- : public CompilationDependencies::Dependency {
+class GlobalPropertyDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the type and the read_only flag.
@@ -294,7 +279,7 @@ class GlobalPropertyDependency final
bool read_only_;
};
-class ProtectorDependency final : public CompilationDependencies::Dependency {
+class ProtectorDependency final : public CompilationDependency {
public:
explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {
DCHECK_EQ(cell_.value().AsSmi(), Isolate::kProtectorValid);
@@ -315,8 +300,7 @@ class ProtectorDependency final : public CompilationDependencies::Dependency {
PropertyCellRef cell_;
};
-class ElementsKindDependency final
- : public CompilationDependencies::Dependency {
+class ElementsKindDependency final : public CompilationDependency {
public:
// TODO(neis): Once the concurrent compiler frontend is always-on, we no
// longer need to explicitly store the elements kind.
@@ -349,7 +333,7 @@ class ElementsKindDependency final
};
class InitialMapInstanceSizePredictionDependency final
- : public CompilationDependencies::Dependency {
+ : public CompilationDependency {
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
@@ -380,7 +364,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
-void CompilationDependencies::RecordDependency(Dependency const* dependency) {
+void CompilationDependencies::RecordDependency(
+ CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
}
@@ -565,6 +550,11 @@ namespace {
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
+ // TODO(neis): Remove heap access (SerializePrototype call).
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
while (true) {
map.SerializePrototype();
HeapObjectRef proto = map.prototype();
@@ -635,7 +625,7 @@ CompilationDependencies::DependOnInitialMapInstanceSizePrediction(
return SlackTrackingPrediction(initial_map, instance_size);
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::TransitionDependencyOffTheRecord(
const MapRef& target_map) const {
if (target_map.CanBeDeprecated()) {
@@ -646,7 +636,7 @@ CompilationDependencies::TransitionDependencyOffTheRecord(
}
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
const MapRef& map, int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
@@ -657,7 +647,7 @@ CompilationDependencies::FieldRepresentationDependencyOffTheRecord(
details.representation());
}
-CompilationDependencies::Dependency const*
+CompilationDependency const*
CompilationDependencies::FieldTypeDependencyOffTheRecord(const MapRef& map,
int descriptor) const {
MapRef owner = map.FindFieldOwner(descriptor);
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index 37a2bc3a28..cb6cea0685 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -25,6 +25,8 @@ class SlackTrackingPrediction {
int inobject_property_count_;
};
+class CompilationDependency;
+
// Collects and installs dependencies of the code that is being generated.
class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
public:
@@ -113,14 +115,13 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
// DependOnTransition(map);
// is equivalent to:
// RecordDependency(TransitionDependencyOffTheRecord(map));
- class Dependency;
- void RecordDependency(Dependency const* dependency);
- Dependency const* TransitionDependencyOffTheRecord(
+ void RecordDependency(CompilationDependency const* dependency);
+ CompilationDependency const* TransitionDependencyOffTheRecord(
const MapRef& target_map) const;
- Dependency const* FieldRepresentationDependencyOffTheRecord(
+ CompilationDependency const* FieldRepresentationDependencyOffTheRecord(
+ const MapRef& map, int descriptor) const;
+ CompilationDependency const* FieldTypeDependencyOffTheRecord(
const MapRef& map, int descriptor) const;
- Dependency const* FieldTypeDependencyOffTheRecord(const MapRef& map,
- int descriptor) const;
// Exposed only for testing purposes.
bool AreValid() const;
@@ -128,7 +129,7 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
private:
Zone* const zone_;
JSHeapBroker* const broker_;
- ZoneForwardList<Dependency const*> dependencies_;
+ ZoneForwardList<CompilationDependency const*> dependencies_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
new file mode 100644
index 0000000000..e5726a0ddb
--- /dev/null
+++ b/deps/v8/src/compiler/compilation-dependency.h
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
+#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
+
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class MaybeObjectHandle;
+
+namespace compiler {
+
+class CompilationDependency : public ZoneObject {
+ public:
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(const MaybeObjectHandle& code) const = 0;
+
+#ifdef DEBUG
+ virtual bool IsPretenureModeDependency() const { return false; }
+#endif
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 7177a6069d..600db1d160 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/control-flow-optimizer.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-matchers.h"
@@ -16,18 +17,20 @@ namespace compiler {
ControlFlowOptimizer::ControlFlowOptimizer(Graph* graph,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine,
+ TickCounter* tick_counter,
Zone* zone)
: graph_(graph),
common_(common),
machine_(machine),
queue_(zone),
queued_(graph, 2),
- zone_(zone) {}
-
+ zone_(zone),
+ tick_counter_(tick_counter) {}
void ControlFlowOptimizer::Optimize() {
Enqueue(graph()->start());
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
if (node->IsDead()) continue;
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index 0a688a7c39..07fc9e6fc2 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -22,7 +25,8 @@ class Node;
class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
public:
ControlFlowOptimizer(Graph* graph, CommonOperatorBuilder* common,
- MachineOperatorBuilder* machine, Zone* zone);
+ MachineOperatorBuilder* machine,
+ TickCounter* tick_counter, Zone* zone);
void Optimize();
@@ -45,6 +49,7 @@ class V8_EXPORT_PRIVATE ControlFlowOptimizer final {
ZoneQueue<Node*> queue_;
NodeMarker<bool> queued_;
Zone* const zone_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(ControlFlowOptimizer);
};
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
new file mode 100644
index 0000000000..620d98019f
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -0,0 +1,336 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/csa-load-elimination.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CsaLoadElimination::Reduce(Node* node) {
+ if (FLAG_trace_turbo_load_elimination) {
+ if (node->op()->EffectInputCount() > 0) {
+ PrintF(" visit #%d:%s", node->id(), node->op()->mnemonic());
+ if (node->op()->ValueInputCount() > 0) {
+ PrintF("(");
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ if (i > 0) PrintF(", ");
+ Node* const value = NodeProperties::GetValueInput(node, i);
+ PrintF("#%d:%s", value->id(), value->op()->mnemonic());
+ }
+ PrintF(")");
+ }
+ PrintF("\n");
+ for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (AbstractState const* const state = node_states_.Get(effect)) {
+ PrintF(" state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ state->Print();
+ } else {
+ PrintF(" no state[%i]: #%d:%s\n", i, effect->id(),
+ effect->op()->mnemonic());
+ }
+ }
+ }
+ }
+ switch (node->opcode()) {
+ case IrOpcode::kLoadFromObject:
+ return ReduceLoadFromObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kStoreToObject:
+ return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
+ case IrOpcode::kDebugBreak:
+ case IrOpcode::kAbortCSAAssert:
+ // Avoid changing optimizations in the presence of debug instructions.
+ return PropagateInputState(node);
+ case IrOpcode::kCall:
+ return ReduceCall(node);
+ case IrOpcode::kEffectPhi:
+ return ReduceEffectPhi(node);
+ case IrOpcode::kDead:
+ break;
+ case IrOpcode::kStart:
+ return ReduceStart(node);
+ default:
+ return ReduceOtherNode(node);
+ }
+ return NoChange();
+}
+
+namespace CsaLoadEliminationHelpers {
+
+bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
+ if (r1 == r2) return true;
+ return IsAnyCompressedTagged(r1) && IsAnyCompressedTagged(r2);
+}
+
+bool ObjectMayAlias(Node* a, Node* b) {
+ if (a != b) {
+ if (b->opcode() == IrOpcode::kAllocate) {
+ std::swap(a, b);
+ }
+ if (a->opcode() == IrOpcode::kAllocate) {
+ switch (b->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kParameter:
+ return false;
+ default:
+ break;
+ }
+ }
+ }
+ return true;
+}
+
+bool OffsetMayAlias(Node* offset1, MachineRepresentation repr1, Node* offset2,
+ MachineRepresentation repr2) {
+ IntPtrMatcher matcher1(offset1);
+ IntPtrMatcher matcher2(offset2);
+ // If either of the offsets is variable, accesses may alias
+ if (!matcher1.HasValue() || !matcher2.HasValue()) {
+ return true;
+ }
+ // Otherwise, we return whether accesses overlap
+ intptr_t start1 = matcher1.Value();
+ intptr_t end1 = start1 + ElementSizeInBytes(repr1);
+ intptr_t start2 = matcher2.Value();
+ intptr_t end2 = start2 + ElementSizeInBytes(repr2);
+ return !(end1 <= start2 || end2 <= start1);
+}
+
+} // namespace CsaLoadEliminationHelpers
+
+namespace Helpers = CsaLoadEliminationHelpers;
+
+void CsaLoadElimination::AbstractState::Merge(AbstractState const* that,
+ Zone* zone) {
+ FieldInfo empty_info;
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ if (that->field_infos_.Get(entry.first) != entry.second) {
+ field_infos_.Set(entry.first, empty_info);
+ }
+ }
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::KillField(Node* kill_object,
+ Node* kill_offset,
+ MachineRepresentation kill_repr,
+ Zone* zone) const {
+ FieldInfo empty_info;
+ AbstractState* that = new (zone) AbstractState(*this);
+ for (std::pair<Field, FieldInfo> entry : that->field_infos_) {
+ Field field = entry.first;
+ MachineRepresentation field_repr = entry.second.representation;
+ if (Helpers::OffsetMayAlias(kill_offset, kill_repr, field.second,
+ field_repr) &&
+ Helpers::ObjectMayAlias(kill_object, field.first)) {
+ that->field_infos_.Set(field, empty_info);
+ }
+ }
+ return that;
+}
+
+CsaLoadElimination::AbstractState const*
+CsaLoadElimination::AbstractState::AddField(Node* object, Node* offset,
+ CsaLoadElimination::FieldInfo info,
+ Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->field_infos_.Set({object, offset}, info);
+ return that;
+}
+
+CsaLoadElimination::FieldInfo CsaLoadElimination::AbstractState::Lookup(
+ Node* object, Node* offset) const {
+ if (object->IsDead()) {
+ return {};
+ }
+ return field_infos_.Get({object, offset});
+}
+
+void CsaLoadElimination::AbstractState::Print() const {
+ for (std::pair<Field, FieldInfo> entry : field_infos_) {
+ Field field = entry.first;
+ Node* object = field.first;
+ Node* offset = field.second;
+ FieldInfo info = entry.second;
+ PrintF(" #%d+#%d:%s -> #%d:%s [repr=%s]\n", object->id(), offset->id(),
+ object->op()->mnemonic(), info.value->id(),
+ info.value->op()->mnemonic(),
+ MachineReprToString(info.representation));
+ }
+}
+
+Reduction CsaLoadElimination::ReduceLoadFromObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ MachineRepresentation representation = access.machine_type.representation();
+ FieldInfo lookup_result = state->Lookup(object, offset);
+ if (!lookup_result.IsEmpty()) {
+ // Make sure we don't reuse values that were recorded with a different
+ // representation or resurrect dead {replacement} nodes.
+ Node* replacement = lookup_result.value;
+ if (Helpers::IsCompatible(representation, lookup_result.representation) &&
+ !replacement->IsDead()) {
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
+ }
+ }
+ FieldInfo info(node, representation);
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStoreToObject(Node* node,
+ ObjectAccess const& access) {
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* offset = NodeProperties::GetValueInput(node, 1);
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+
+ FieldInfo info(value, access.machine_type.representation());
+ state = state->KillField(object, offset, info.representation, zone());
+ state = state->AddField(object, offset, info, zone());
+
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceEffectPhi(Node* node) {
+ Node* const effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ AbstractState const* state0 = node_states_.Get(effect0);
+ if (state0 == nullptr) return NoChange();
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Here we rely on having only reducible loops:
+ // The loop entry edge always dominates the header, so we can just take
+ // the state from the first input, and compute the loop state based on it.
+ AbstractState const* state = ComputeLoopState(node, state0);
+ return UpdateState(node, state);
+ }
+ DCHECK_EQ(IrOpcode::kMerge, control->opcode());
+
+ // Shortcut for the case when we do not know anything about some input.
+ int const input_count = node->op()->EffectInputCount();
+ for (int i = 1; i < input_count; ++i) {
+ Node* const effect = NodeProperties::GetEffectInput(node, i);
+ if (node_states_.Get(effect) == nullptr) return NoChange();
+ }
+
+ // Make a copy of the first input's state and merge with the state
+ // from other inputs.
+ AbstractState* state = new (zone()) AbstractState(*state0);
+ for (int i = 1; i < input_count; ++i) {
+ Node* const input = NodeProperties::GetEffectInput(node, i);
+ state->Merge(node_states_.Get(input), zone());
+ }
+ return UpdateState(node, state);
+}
+
+Reduction CsaLoadElimination::ReduceStart(Node* node) {
+ return UpdateState(node, empty_state());
+}
+
+Reduction CsaLoadElimination::ReduceCall(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ ExternalReferenceMatcher m(value);
+ if (m.Is(ExternalReference::check_object_type())) {
+ return PropagateInputState(node);
+ }
+ return ReduceOtherNode(node);
+}
+
+Reduction CsaLoadElimination::ReduceOtherNode(Node* node) {
+ if (node->op()->EffectInputCount() == 1) {
+ if (node->op()->EffectOutputCount() == 1) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ // If we do not know anything about the predecessor, do not propagate
+ // just yet because we will have to recompute anyway once we compute
+ // the predecessor.
+ if (state == nullptr) return NoChange();
+ // Check if this {node} has some uncontrolled side effects.
+ if (!node->op()->HasProperty(Operator::kNoWrite)) {
+ state = empty_state();
+ }
+ return UpdateState(node, state);
+ } else {
+ return NoChange();
+ }
+ }
+ DCHECK_EQ(0, node->op()->EffectInputCount());
+ DCHECK_EQ(0, node->op()->EffectOutputCount());
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::UpdateState(Node* node,
+ AbstractState const* state) {
+ AbstractState const* original = node_states_.Get(node);
+ // Only signal that the {node} has Changed, if the information about {state}
+ // has changed wrt. the {original}.
+ if (state != original) {
+ if (original == nullptr || !state->Equals(original)) {
+ node_states_.Set(node, state);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+Reduction CsaLoadElimination::PropagateInputState(Node* node) {
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ AbstractState const* state = node_states_.Get(effect);
+ if (state == nullptr) return NoChange();
+ return UpdateState(node, state);
+}
+
+CsaLoadElimination::AbstractState const* CsaLoadElimination::ComputeLoopState(
+ Node* node, AbstractState const* state) const {
+ DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
+ Node* const control = NodeProperties::GetControlInput(node);
+ ZoneQueue<Node*> queue(zone());
+ ZoneSet<Node*> visited(zone());
+ visited.insert(node);
+ for (int i = 1; i < control->InputCount(); ++i) {
+ queue.push(node->InputAt(i));
+ }
+ while (!queue.empty()) {
+ Node* const current = queue.front();
+ queue.pop();
+ if (visited.insert(current).second) {
+ if (!current->op()->HasProperty(Operator::kNoWrite)) {
+ return empty_state();
+ }
+ for (int i = 0; i < current->op()->EffectInputCount(); ++i) {
+ queue.push(NodeProperties::GetEffectInput(current, i));
+ }
+ }
+ }
+ return state;
+}
+
+CommonOperatorBuilder* CsaLoadElimination::common() const {
+ return jsgraph()->common();
+}
+
+Graph* CsaLoadElimination::graph() const { return jsgraph()->graph(); }
+
+Isolate* CsaLoadElimination::isolate() const { return jsgraph()->isolate(); }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/csa-load-elimination.h b/deps/v8/src/compiler/csa-load-elimination.h
new file mode 100644
index 0000000000..9460858d04
--- /dev/null
+++ b/deps/v8/src/compiler/csa-load-elimination.h
@@ -0,0 +1,118 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+#define V8_COMPILER_CSA_LOAD_ELIMINATION_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/persistent-map.h"
+#include "src/handles/maybe-handles.h"
+#include "src/zone/zone-handle-set.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+struct ObjectAccess;
+class Graph;
+class JSGraph;
+
+class V8_EXPORT_PRIVATE CsaLoadElimination final
+ : public NON_EXPORTED_BASE(AdvancedReducer) {
+ public:
+ CsaLoadElimination(Editor* editor, JSGraph* jsgraph, Zone* zone)
+ : AdvancedReducer(editor),
+ empty_state_(zone),
+ node_states_(jsgraph->graph()->NodeCount(), zone),
+ jsgraph_(jsgraph),
+ zone_(zone) {}
+ ~CsaLoadElimination() final = default;
+
+ const char* reducer_name() const override { return "CsaLoadElimination"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ struct FieldInfo {
+ FieldInfo() = default;
+ FieldInfo(Node* value, MachineRepresentation representation)
+ : value(value), representation(representation) {}
+
+ bool operator==(const FieldInfo& other) const {
+ return value == other.value && representation == other.representation;
+ }
+
+ bool operator!=(const FieldInfo& other) const { return !(*this == other); }
+
+ bool IsEmpty() const { return value == nullptr; }
+
+ Node* value = nullptr;
+ MachineRepresentation representation = MachineRepresentation::kNone;
+ };
+
+ class AbstractState final : public ZoneObject {
+ public:
+ explicit AbstractState(Zone* zone) : field_infos_(zone) {}
+
+ bool Equals(AbstractState const* that) const {
+ return field_infos_ == that->field_infos_;
+ }
+ void Merge(AbstractState const* that, Zone* zone);
+
+ AbstractState const* KillField(Node* object, Node* offset,
+ MachineRepresentation repr,
+ Zone* zone) const;
+ AbstractState const* AddField(Node* object, Node* offset, FieldInfo info,
+ Zone* zone) const;
+ FieldInfo Lookup(Node* object, Node* offset) const;
+
+ void Print() const;
+
+ private:
+ using Field = std::pair<Node*, Node*>;
+ using FieldInfos = PersistentMap<Field, FieldInfo>;
+ FieldInfos field_infos_;
+ };
+
+ Reduction ReduceLoadFromObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceStoreToObject(Node* node, ObjectAccess const& access);
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReduceStart(Node* node);
+ Reduction ReduceCall(Node* node);
+ Reduction ReduceOtherNode(Node* node);
+
+ Reduction UpdateState(Node* node, AbstractState const* state);
+ Reduction PropagateInputState(Node* node);
+
+ AbstractState const* ComputeLoopState(Node* node,
+ AbstractState const* state) const;
+
+ CommonOperatorBuilder* common() const;
+ Isolate* isolate() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Zone* zone() const { return zone_; }
+ AbstractState const* empty_state() const { return &empty_state_; }
+
+ AbstractState const empty_state_;
+ NodeAuxData<AbstractState const*> node_states_;
+ JSGraph* const jsgraph_;
+ Zone* zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(CsaLoadElimination);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_CSA_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/decompression-elimination.cc b/deps/v8/src/compiler/decompression-elimination.cc
index e69e61fac5..537744652b 100644
--- a/deps/v8/src/compiler/decompression-elimination.cc
+++ b/deps/v8/src/compiler/decompression-elimination.cc
@@ -21,10 +21,8 @@ bool DecompressionElimination::IsReducibleConstantOpcode(
IrOpcode::Value opcode) {
switch (opcode) {
case IrOpcode::kInt64Constant:
- return true;
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
case IrOpcode::kHeapConstant:
+ return true;
default:
return false;
}
@@ -55,13 +53,8 @@ Node* DecompressionElimination::GetCompressedConstant(Node* constant) {
static_cast<int32_t>(OpParameter<int64_t>(constant->op()))));
break;
case IrOpcode::kHeapConstant:
- // TODO(v8:8977): The HeapConstant remains as 64 bits. This does not
- // affect the comparison and it will still work correctly. However, we are
- // introducing a 64 bit value in the stream where a 32 bit one will
- // suffice. Currently there is no "CompressedHeapConstant", and
- // introducing a new opcode and handling it correctly throught the
- // pipeline seems that it will involve quite a bit of work.
- return constant;
+ return graph()->NewNode(
+ common()->CompressedHeapConstant(HeapConstantOf(constant->op())));
default:
UNREACHABLE();
}
@@ -84,6 +77,21 @@ Reduction DecompressionElimination::ReduceCompress(Node* node) {
}
}
+Reduction DecompressionElimination::ReduceDecompress(Node* node) {
+ DCHECK(IrOpcode::IsDecompressOpcode(node->opcode()));
+
+ DCHECK_EQ(node->InputCount(), 1);
+ Node* input_node = node->InputAt(0);
+ IrOpcode::Value input_opcode = input_node->opcode();
+ if (IrOpcode::IsCompressOpcode(input_opcode)) {
+ DCHECK(IsValidDecompress(input_opcode, node->opcode()));
+ DCHECK_EQ(input_node->InputCount(), 1);
+ return Replace(input_node->InputAt(0));
+ } else {
+ return NoChange();
+ }
+}
+
Reduction DecompressionElimination::ReducePhi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kPhi);
@@ -138,7 +146,10 @@ Reduction DecompressionElimination::ReducePhi(Node* node) {
// Add a decompress after the Phi. To do this, we need to replace the Phi with
// "Phi <- Decompress".
- return Replace(graph()->NewNode(op, node));
+ Node* decompress = graph()->NewNode(op, node);
+ ReplaceWithValue(node, decompress);
+ decompress->ReplaceInput(0, node);
+ return Changed(node);
}
Reduction DecompressionElimination::ReduceTypedStateValues(Node* node) {
@@ -201,6 +212,10 @@ Reduction DecompressionElimination::Reduce(Node* node) {
case IrOpcode::kChangeTaggedSignedToCompressedSigned:
case IrOpcode::kChangeTaggedPointerToCompressedPointer:
return ReduceCompress(node);
+ case IrOpcode::kChangeCompressedToTagged:
+ case IrOpcode::kChangeCompressedSignedToTaggedSigned:
+ case IrOpcode::kChangeCompressedPointerToTaggedPointer:
+ return ReduceDecompress(node);
case IrOpcode::kPhi:
return ReducePhi(node);
case IrOpcode::kTypedStateValues:
diff --git a/deps/v8/src/compiler/decompression-elimination.h b/deps/v8/src/compiler/decompression-elimination.h
index c850f064a9..85a6c98aa0 100644
--- a/deps/v8/src/compiler/decompression-elimination.h
+++ b/deps/v8/src/compiler/decompression-elimination.h
@@ -38,7 +38,7 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// elimination.
bool IsReducibleConstantOpcode(IrOpcode::Value opcode);
- // Get the new 32 bit node constant given the 64 bit one
+ // Get the new 32 bit node constant given the 64 bit one.
Node* GetCompressedConstant(Node* constant);
// Removes direct Decompressions & Compressions, going from
@@ -48,6 +48,9 @@ class V8_EXPORT_PRIVATE DecompressionElimination final
// Can be used for Any, Signed, and Pointer compressions.
Reduction ReduceCompress(Node* node);
+ // Removes direct Compressions & Decompressions, analogously to ReduceCompress
+ Reduction ReduceDecompress(Node* node);
+
// Replaces Phi's input decompressions with their input node, if and only if
// all of the Phi's inputs are Decompress nodes.
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index cc6ca954f3..cac1b1726b 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -33,13 +33,13 @@ struct Diamond {
}
// Place {this} after {that} in control flow order.
- void Chain(Diamond& that) { branch->ReplaceInput(1, that.merge); }
+ void Chain(Diamond const& that) { branch->ReplaceInput(1, that.merge); }
// Place {this} after {that} in control flow order.
void Chain(Node* that) { branch->ReplaceInput(1, that); }
// Nest {this} into either the if_true or if_false branch of {that}.
- void Nest(Diamond& that, bool if_true) {
+ void Nest(Diamond const& that, bool if_true) {
if (if_true) {
branch->ReplaceInput(1, that.if_true);
that.merge->ReplaceInput(0, merge);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index ced078a178..788638fe68 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -17,6 +17,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/execution/frames.h"
#include "src/heap/factory-inl.h"
#include "src/objects/heap-number.h"
#include "src/objects/oddball.h"
@@ -51,6 +52,7 @@ class EffectControlLinearizer {
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToCompressedSigned(Node* node);
Node* LowerChangeInt31ToTaggedSigned(Node* node);
Node* LowerChangeInt32ToTagged(Node* node);
Node* LowerChangeInt64ToTagged(Node* node);
@@ -58,6 +60,7 @@ class EffectControlLinearizer {
Node* LowerChangeUint64ToTagged(Node* node);
Node* LowerChangeFloat64ToTagged(Node* node);
Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeCompressedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt32(Node* node);
Node* LowerChangeTaggedSignedToInt64(Node* node);
Node* LowerChangeTaggedToBit(Node* node);
@@ -75,6 +78,7 @@ class EffectControlLinearizer {
Node* LowerCheckReceiver(Node* node, Node* frame_state);
Node* LowerCheckReceiverOrNullOrUndefined(Node* node, Node* frame_state);
Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckBigInt(Node* node, Node* frame_state);
Node* LowerCheckSymbol(Node* node, Node* frame_state);
void LowerCheckIf(Node* node, Node* frame_state);
Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
@@ -84,6 +88,7 @@ class EffectControlLinearizer {
Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToCompressedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToInt32(Node* node, Node* frame_state);
Node* LowerCheckedInt64ToTaggedSigned(Node* node, Node* frame_state);
@@ -101,6 +106,9 @@ class EffectControlLinearizer {
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerBigIntAsUintN(Node* node, Node* frame_state);
+ Node* LowerChangeUint64ToBigInt(Node* node);
+ Node* LowerTruncateBigIntToUint64(Node* node);
Node* LowerCheckedCompressedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedCompressedToTaggedPointer(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToCompressedSigned(Node* node, Node* frame_state);
@@ -150,17 +158,20 @@ class EffectControlLinearizer {
Node* LowerStringConcat(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharCodeAt(Node* node);
- Node* LowerStringCodePointAt(Node* node, UnicodeEncoding encoding);
+ Node* LowerStringCodePointAt(Node* node);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromSingleCharCode(Node* node);
Node* LowerStringFromSingleCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
Node* LowerStringSubstring(Node* node);
+ Node* LowerStringFromCodePointAt(Node* node);
Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerBigIntAdd(Node* node, Node* frame_state);
+ Node* LowerBigIntNegate(Node* node);
Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
Node* LowerCheckNotTaggedHole(Node* node, Node* frame_state);
Node* LowerConvertTaggedHoleToUndefined(Node* node);
@@ -186,6 +197,7 @@ class EffectControlLinearizer {
void LowerTransitionAndStoreNumberElement(Node* node);
void LowerTransitionAndStoreNonNumberElement(Node* node);
void LowerRuntimeAbort(Node* node);
+ Node* LowerAssertType(Node* node);
Node* LowerConvertReceiver(Node* node);
Node* LowerDateNow(Node* node);
@@ -214,6 +226,7 @@ class EffectControlLinearizer {
Node* LowerStringComparison(Callable const& callable, Node* node);
Node* IsElementsKindGreaterThan(Node* kind, ElementsKind reference_kind);
+ Node* ChangeInt32ToCompressedSmi(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeInt32ToIntPtr(Node* value);
Node* ChangeInt64ToSmi(Node* value);
@@ -222,6 +235,7 @@ class EffectControlLinearizer {
Node* ChangeUint32ToUintPtr(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* ChangeSmiToIntPtr(Node* value);
+ Node* ChangeCompressedSmiToInt32(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeSmiToInt64(Node* value);
Node* ObjectIsSmi(Node* value);
@@ -827,6 +841,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeBitToTagged:
result = LowerChangeBitToTagged(node);
break;
+ case IrOpcode::kChangeInt31ToCompressedSigned:
+ result = LowerChangeInt31ToCompressedSigned(node);
+ break;
case IrOpcode::kChangeInt31ToTaggedSigned:
result = LowerChangeInt31ToTaggedSigned(node);
break;
@@ -848,6 +865,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kChangeFloat64ToTaggedPointer:
result = LowerChangeFloat64ToTaggedPointer(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
+ result = LowerChangeCompressedSignedToInt32(node);
+ break;
case IrOpcode::kChangeTaggedSignedToInt32:
result = LowerChangeTaggedSignedToInt32(node);
break;
@@ -911,6 +931,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckString:
result = LowerCheckString(node, frame_state);
break;
+ case IrOpcode::kCheckBigInt:
+ result = LowerCheckBigInt(node, frame_state);
+ break;
case IrOpcode::kCheckInternalizedString:
result = LowerCheckInternalizedString(node, frame_state);
break;
@@ -938,6 +961,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedInt32Mul:
result = LowerCheckedInt32Mul(node, frame_state);
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
+ result = LowerCheckedInt32ToCompressedSigned(node, frame_state);
+ break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
@@ -993,6 +1019,15 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToTaggedPointer:
result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
+ case IrOpcode::kBigIntAsUintN:
+ result = LowerBigIntAsUintN(node, frame_state);
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ result = LowerChangeUint64ToBigInt(node);
+ break;
+ case IrOpcode::kTruncateBigIntToUint64:
+ result = LowerTruncateBigIntToUint64(node);
+ break;
case IrOpcode::kCheckedCompressedToTaggedSigned:
result = LowerCheckedCompressedToTaggedSigned(node, frame_state);
break;
@@ -1110,6 +1145,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringFromCodePointAt:
+ result = LowerStringFromCodePointAt(node);
+ break;
case IrOpcode::kStringLength:
result = LowerStringLength(node);
break;
@@ -1120,7 +1158,7 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringCodePointAt:
- result = LowerStringCodePointAt(node, UnicodeEncodingOf(node->op()));
+ result = LowerStringCodePointAt(node);
break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
@@ -1140,6 +1178,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kBigIntAdd:
+ result = LowerBigIntAdd(node, frame_state);
+ break;
+ case IrOpcode::kBigIntNegate:
+ result = LowerBigIntNegate(node);
+ break;
case IrOpcode::kNumberIsFloat64Hole:
result = LowerNumberIsFloat64Hole(node);
break;
@@ -1233,6 +1277,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kRuntimeAbort:
LowerRuntimeAbort(node);
break;
+ case IrOpcode::kAssertType:
+ result = LowerAssertType(node);
+ break;
case IrOpcode::kConvertReceiver:
result = LowerConvertReceiver(node);
break;
@@ -1357,6 +1404,11 @@ Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerChangeInt31ToCompressedSigned(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeInt32ToCompressedSmi(value);
+}
+
Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
return ChangeInt32ToSmi(value);
@@ -1461,6 +1513,11 @@ Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
return ChangeSmiToInt32(value);
}
+Node* EffectControlLinearizer::LowerChangeCompressedSignedToInt32(Node* node) {
+ Node* value = node->InputAt(0);
+ return ChangeCompressedSmiToInt32(value);
+}
+
Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt64(Node* node) {
Node* value = node->InputAt(0);
return ChangeSmiToInt64(value);
@@ -1684,8 +1741,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToCompressedSigned(Node* node) {
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
vfalse = __ ChangeFloat64ToInt32(vfalse);
- vfalse = ChangeInt32ToSmi(vfalse);
- vfalse = __ ChangeTaggedSignedToCompressedSigned(vfalse);
+ vfalse = ChangeInt32ToCompressedSmi(vfalse);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -2283,6 +2339,19 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
return value;
}
+Node* EffectControlLinearizer::LowerCheckedInt32ToCompressedSigned(
+ Node* node, Node* frame_state) {
+ DCHECK(SmiValuesAre31Bits());
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
+ return __ Projection(0, add);
+}
+
Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
@@ -2651,6 +2720,121 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
return value;
}
+Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
+
+ // Check for Smi.
+ Node* smi_check = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), smi_check,
+ frame_state);
+
+ // Check for BigInt.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* bi_check = __ WordEqual(value_map, __ BigIntMapConstant());
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ bi_check, frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node,
+ Node* frame_state) {
+ DCHECK(machine()->Is64());
+
+ const int bits = OpParameter<int>(node->op());
+ DCHECK(0 <= bits && bits <= 64);
+
+ if (bits == 64) {
+ // Reduce to nop.
+ return node->InputAt(0);
+ } else {
+ const uint64_t msk = (1ULL << bits) - 1ULL;
+ return __ Word64And(node->InputAt(0), __ Int64Constant(msk));
+ }
+}
+
+Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
+ DCHECK(machine()->Is64());
+
+ Node* value = node->InputAt(0);
+ Node* map = jsgraph()->HeapConstant(factory()->bigint_map());
+ // BigInts with value 0 must be of size 0 (canonical form).
+ auto if_zerodigits = __ MakeLabel();
+ auto if_onedigit = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTagged);
+
+ __ GotoIf(__ Word64Equal(value, __ IntPtrConstant(0)), &if_zerodigits);
+ __ Goto(&if_onedigit);
+
+ __ Bind(&if_onedigit);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(1)));
+ const auto bitfield = BigInt::LengthBits::update(0, 1);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ StoreField(AccessBuilder::ForBigIntLeastSignificantDigit64(), result,
+ value);
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&if_zerodigits);
+ {
+ Node* result = __ Allocate(AllocationType::kYoung,
+ __ IntPtrConstant(BigInt::SizeFor(0)));
+ const auto bitfield = BigInt::LengthBits::update(0, 0);
+ __ StoreField(AccessBuilder::ForMap(), result, map);
+ __ StoreField(AccessBuilder::ForBigIntBitfield(), result,
+ __ IntPtrConstant(bitfield));
+ // BigInts have no padding on 64 bit architectures with pointer compression.
+ if (BigInt::HasOptionalPadding()) {
+ __ StoreField(AccessBuilder::ForBigIntOptionalPadding(), result,
+ __ IntPtrConstant(0));
+ }
+ __ Goto(&done, result);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerTruncateBigIntToUint64(Node* node) {
+ DCHECK(machine()->Is64());
+
+ auto done = __ MakeLabel(MachineRepresentation::kWord64);
+ auto if_neg = __ MakeLabel();
+ auto if_not_zero = __ MakeLabel();
+
+ Node* value = node->InputAt(0);
+
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ __ GotoIfNot(__ Word32Equal(bitfield, __ Int32Constant(0)), &if_not_zero);
+ __ Goto(&done, __ Int64Constant(0));
+
+ __ Bind(&if_not_zero);
+ {
+ Node* lsd =
+ __ LoadField(AccessBuilder::ForBigIntLeastSignificantDigit64(), value);
+ Node* sign =
+ __ Word32And(bitfield, __ Int32Constant(BigInt::SignBits::kMask));
+ __ GotoIf(__ Word32Equal(sign, __ Int32Constant(1)), &if_neg);
+ __ Goto(&done, lsd);
+
+ __ Bind(&if_neg);
+ __ Goto(&done, __ Int64Sub(__ Int64Constant(0), lsd));
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerCheckedCompressedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
@@ -3726,16 +3910,12 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
return loop_done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerStringCodePointAt(
- Node* node, UnicodeEncoding encoding) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
- Builtins::Name builtin = encoding == UnicodeEncoding::UTF16
- ? Builtins::kStringCodePointAtUTF16
- : Builtins::kStringCodePointAtUTF32;
-
- Callable const callable = Builtins::CallableFor(isolate(), builtin);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -3968,31 +4148,23 @@ Node* EffectControlLinearizer::LowerStringFromSingleCodePoint(Node* node) {
__ Bind(&if_not_single_code);
// Generate surrogate pair string
{
- switch (UnicodeEncodingOf(node->op())) {
- case UnicodeEncoding::UTF16:
- break;
+ // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
+ Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
- case UnicodeEncoding::UTF32: {
- // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
+ // lead = (codepoint >> 10) + LEAD_OFFSET
+ Node* lead =
+ __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
- // lead = (codepoint >> 10) + LEAD_OFFSET
- Node* lead =
- __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
+ // trail = (codepoint & 0x3FF) + 0xDC00;
+ Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+ __ Int32Constant(0xDC00));
- // trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
- __ Int32Constant(0xDC00));
-
- // codpoint = (trail << 16) | lead;
+ // codpoint = (trail << 16) | lead;
#if V8_TARGET_BIG_ENDIAN
- code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
+ code = __ Word32Or(__ Word32Shl(lead, __ Int32Constant(16)), trail);
#else
- code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
+ code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
#endif
- break;
- }
- }
// Allocate a new SeqTwoByteString for {code}.
Node* vfalse0 =
@@ -4032,6 +4204,21 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
search_string, position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringFromCodePointAt(Node* node) {
+ Node* string = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringFromCodePointAt);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), string,
+ index, __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerStringLength(Node* node) {
Node* subject = node->InputAt(0);
@@ -4083,6 +4270,41 @@ Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
Builtins::CallableFor(isolate(), Builtins::kStringLessThanOrEqual), node);
}
+Node* EffectControlLinearizer::LowerBigIntAdd(Node* node, Node* frame_state) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntAddNoThrow);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()), lhs,
+ rhs, __ NoContextConstant());
+
+ // Check for exception sentinel: Smi is returned to signal BigIntTooBig.
+ __ DeoptimizeIf(DeoptimizeReason::kBigIntTooBig, VectorSlotPair{},
+ ObjectIsSmi(value), frame_state);
+
+ return value;
+}
+
+Node* EffectControlLinearizer::LowerBigIntNegate(Node* node) {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kBigIntUnaryMinus);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), CallDescriptor::kNoFlags,
+ Operator::kFoldable | Operator::kNoThrow);
+ Node* value =
+ __ Call(call_descriptor, jsgraph()->HeapConstant(callable.code()),
+ node->InputAt(0), __ NoContextConstant());
+
+ return value;
+}
+
Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
@@ -4256,6 +4478,11 @@ Node* EffectControlLinearizer::ChangeIntPtrToInt32(Node* value) {
return value;
}
+Node* EffectControlLinearizer::ChangeInt32ToCompressedSmi(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Shl(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
// Do shift on 32bit values if Smis are stored in the lower word.
if (machine()->Is64() && SmiValuesAre31Bits()) {
@@ -4305,6 +4532,11 @@ Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
return ChangeSmiToIntPtr(value);
}
+Node* EffectControlLinearizer::ChangeCompressedSmiToInt32(Node* value) {
+ CHECK(machine()->Is64() && SmiValuesAre31Bits());
+ return __ Word32Sar(value, SmiShiftBitsConstant());
+}
+
Node* EffectControlLinearizer::ChangeSmiToInt64(Node* value) {
CHECK(machine()->Is64());
return ChangeSmiToIntPtr(value);
@@ -5163,6 +5395,30 @@ void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
__ Int32Constant(1), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerAssertType(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
+ Type type = OpParameter<Type>(node->op());
+ DCHECK(type.IsRange());
+ auto range = type.AsRange();
+
+ Node* const input = node->InputAt(0);
+ Node* const min = __ NumberConstant(range->Min());
+ Node* const max = __ NumberConstant(range->Max());
+
+ {
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kCheckNumberInRange);
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ callable.descriptor().GetStackParameterCount(), flags, properties);
+ __ Call(call_descriptor, __ HeapConstant(callable.code()), input, min, max,
+ __ NoContextConstant());
+ return input;
+ }
+}
+
Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
ConvertReceiverMode const mode = ConvertReceiverModeOf(node->op());
Node* value = node->InputAt(0);
@@ -5187,7 +5443,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
Operator::Properties properties = Operator::kEliminatable;
Callable callable = Builtins::CallableFor(isolate(), Builtins::kToObject);
@@ -5220,7 +5476,7 @@ Node* EffectControlLinearizer::LowerConvertReceiver(Node* node) {
__ GotoIf(check, &convert_to_object);
__ Goto(&done_convert, value);
- // Wrap the primitive {value} into a JSValue.
+ // Wrap the primitive {value} into a JSPrimitiveWrapper.
__ Bind(&convert_to_object);
__ GotoIf(__ WordEqual(value, __ UndefinedConstant()),
&convert_global_proxy);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index dc0db4d780..aee0121384 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/escape-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -153,6 +154,7 @@ class VariableTracker {
ZoneVector<Node*> buffer_;
EffectGraphReducer* reducer_;
int next_variable_ = 0;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(VariableTracker);
};
@@ -279,12 +281,14 @@ class EscapeAnalysisTracker : public ZoneObject {
};
EffectGraphReducer::EffectGraphReducer(
- Graph* graph, std::function<void(Node*, Reduction*)> reduce, Zone* zone)
+ Graph* graph, std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone)
: graph_(graph),
state_(graph, kNumStates),
revisit_(zone),
stack_(zone),
- reduce_(std::move(reduce)) {}
+ reduce_(std::move(reduce)),
+ tick_counter_(tick_counter) {}
void EffectGraphReducer::ReduceFrom(Node* node) {
// Perform DFS and eagerly trigger revisitation as soon as possible.
@@ -293,6 +297,7 @@ void EffectGraphReducer::ReduceFrom(Node* node) {
DCHECK(stack_.empty());
stack_.push({node, 0});
while (!stack_.empty()) {
+ tick_counter_->DoTick();
Node* current = stack_.top().node;
int& input_index = stack_.top().input_index;
if (input_index < current->InputCount()) {
@@ -357,7 +362,8 @@ VariableTracker::VariableTracker(JSGraph* graph, EffectGraphReducer* reducer,
graph_(graph),
table_(zone, State(zone)),
buffer_(zone),
- reducer_(reducer) {}
+ reducer_(reducer),
+ tick_counter_(reducer->tick_counter()) {}
VariableTracker::Scope::Scope(VariableTracker* states, Node* node,
Reduction* reduction)
@@ -406,6 +412,7 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
State first_input = table_.Get(NodeProperties::GetEffectInput(effect_phi, 0));
State result = first_input;
for (std::pair<Variable, Node*> var_value : first_input) {
+ tick_counter_->DoTick();
if (Node* value = var_value.second) {
Variable var = var_value.first;
TRACE("var %i:\n", var.id_);
@@ -441,10 +448,12 @@ VariableTracker::State VariableTracker::MergeInputs(Node* effect_phi) {
// [old_value] cannot originate from the inputs. Thus [old_value]
// must have been created by a previous reduction of this [effect_phi].
for (int i = 0; i < arity; ++i) {
- NodeProperties::ReplaceValueInput(
- old_value, buffer_[i] ? buffer_[i] : graph_->Dead(), i);
- // This change cannot affect the rest of the reducer, so there is no
- // need to trigger additional revisitations.
+ Node* old_input = NodeProperties::GetValueInput(old_value, i);
+ Node* new_input = buffer_[i] ? buffer_[i] : graph_->Dead();
+ if (old_input != new_input) {
+ NodeProperties::ReplaceValueInput(old_value, new_input, i);
+ reducer_->Revisit(old_value);
+ }
}
result.Set(var, old_value);
} else {
@@ -701,21 +710,19 @@ void ReduceNode(const Operator* op, EscapeAnalysisTracker::Scope* current,
} else if (right_object && !right_object->HasEscaped()) {
replacement = jsgraph->FalseConstant();
}
- if (replacement) {
- // TODO(tebbi) This is a workaround for uninhabited types. If we
- // replaced a value of uninhabited type with a constant, we would
- // widen the type of the node. This could produce inconsistent
- // types (which might confuse representation selection). We get
- // around this by refusing to constant-fold and escape-analyze
- // if the type is not inhabited.
- if (!NodeProperties::GetType(left).IsNone() &&
- !NodeProperties::GetType(right).IsNone()) {
- current->SetReplacement(replacement);
- } else {
- current->SetEscaped(left);
- current->SetEscaped(right);
- }
+ // TODO(tebbi) This is a workaround for uninhabited types. If we
+ // replaced a value of uninhabited type with a constant, we would
+ // widen the type of the node. This could produce inconsistent
+ // types (which might confuse representation selection). We get
+ // around this by refusing to constant-fold and escape-analyze
+ // if the type is not inhabited.
+ if (replacement && !NodeProperties::GetType(left).IsNone() &&
+ !NodeProperties::GetType(right).IsNone()) {
+ current->SetReplacement(replacement);
+ break;
}
+ current->SetEscaped(left);
+ current->SetEscaped(right);
break;
}
case IrOpcode::kCheckMaps: {
@@ -817,11 +824,12 @@ void EscapeAnalysis::Reduce(Node* node, Reduction* reduction) {
ReduceNode(op, &current, jsgraph());
}
-EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
+EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter,
+ Zone* zone)
: EffectGraphReducer(
jsgraph->graph(),
[this](Node* node, Reduction* reduction) { Reduce(node, reduction); },
- zone),
+ tick_counter, zone),
tracker_(new (zone) EscapeAnalysisTracker(jsgraph, this, zone)),
jsgraph_(jsgraph) {}
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index c3dcd2f74d..0fbc7d0bdd 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -14,6 +14,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class CommonOperatorBuilder;
@@ -38,7 +41,8 @@ class EffectGraphReducer {
};
EffectGraphReducer(Graph* graph,
- std::function<void(Node*, Reduction*)> reduce, Zone* zone);
+ std::function<void(Node*, Reduction*)> reduce,
+ TickCounter* tick_counter, Zone* zone);
void ReduceGraph() { ReduceFrom(graph_->end()); }
@@ -56,6 +60,8 @@ class EffectGraphReducer {
bool Complete() { return stack_.empty() && revisit_.empty(); }
+ TickCounter* tick_counter() const { return tick_counter_; }
+
private:
struct NodeState {
Node* node;
@@ -69,6 +75,7 @@ class EffectGraphReducer {
ZoneStack<Node*> revisit_;
ZoneStack<NodeState> stack_;
std::function<void(Node*, Reduction*)> reduce_;
+ TickCounter* const tick_counter_;
};
// A variable is an abstract storage location, which is lowered to SSA values
@@ -164,7 +171,7 @@ class EscapeAnalysisResult {
class V8_EXPORT_PRIVATE EscapeAnalysis final
: public NON_EXPORTED_BASE(EffectGraphReducer) {
public:
- EscapeAnalysis(JSGraph* jsgraph, Zone* zone);
+ EscapeAnalysis(JSGraph* jsgraph, TickCounter* tick_counter, Zone* zone);
EscapeAnalysisResult analysis_result() {
DCHECK(Complete());
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index cc9dbd9dfd..50f29d968b 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -52,6 +52,9 @@ Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
return jsgraph()->HeapConstant(object);
}
+Node* GraphAssembler::NumberConstant(double value) {
+ return jsgraph()->Constant(value);
+}
Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
return jsgraph()->ExternalConstant(ref);
@@ -221,6 +224,12 @@ Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
current_effect_, current_control_);
}
+Node* GraphAssembler::BitcastTaggedSignedToWord(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastTaggedSignedToWord(), value,
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 74b885b788..e2c0005d15 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -57,6 +57,7 @@ namespace compiler {
V(Word32Shr) \
V(Word32Shl) \
V(Word32Sar) \
+ V(Word64And) \
V(IntAdd) \
V(IntSub) \
V(IntMul) \
@@ -71,6 +72,7 @@ namespace compiler {
V(Uint64LessThan) \
V(Uint64LessThanOrEqual) \
V(Int32LessThan) \
+ V(Int64Sub) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Div) \
@@ -93,22 +95,24 @@ namespace compiler {
V(Uint32Mod) \
V(Uint32Div)
-#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(HeapNumberMapConstant) \
- V(NoContextConstant) \
- V(EmptyStringConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(FixedArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant)
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(NoContextConstant) \
+ V(EmptyStringConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(FixedArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant)
class GraphAssembler;
@@ -196,6 +200,7 @@ class GraphAssembler {
Node* Float64Constant(double value);
Node* Projection(int index, Node* value);
Node* HeapConstant(Handle<HeapObject> object);
+ Node* NumberConstant(double value);
Node* CEntryStubConstant(int result_size);
Node* ExternalConstant(ExternalReference ref);
@@ -225,6 +230,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
+ Node* BitcastTaggedSignedToWord(Node* value);
Node* Allocate(AllocationType allocation, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index fafa322d87..9a0dea6b26 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -5,10 +5,11 @@
#include <functional>
#include <limits>
-#include "src/compiler/graph.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/node.h"
+#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
namespace v8 {
@@ -25,13 +26,15 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
-GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
+GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
- stack_(zone) {
+ stack_(zone),
+ tick_counter_(tick_counter) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -82,6 +85,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
auto skip = reducers_.end();
for (auto i = reducers_.begin(); i != reducers_.end();) {
if (i != skip) {
+ tick_counter_->DoTick();
Reduction reduction = (*i)->Reduce(node);
if (!reduction.Changed()) {
// No change from this reducer.
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 3bb20a4625..bbcc67b074 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -12,13 +12,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
class Graph;
class Node;
-
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
// out-of-line data associated with each node.
using NodeId = uint32_t;
@@ -129,7 +131,8 @@ class AdvancedReducer : public Reducer {
class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
- GraphReducer(Zone* zone, Graph* graph, Node* dead = nullptr);
+ GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
+ Node* dead = nullptr);
~GraphReducer() override;
Graph* graph() const { return graph_; }
@@ -181,6 +184,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneVector<Reducer*> reducers_;
ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
+ TickCounter* const tick_counter_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
new file mode 100644
index 0000000000..5547039fa6
--- /dev/null
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -0,0 +1,906 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_HEAP_REFS_H_
+#define V8_COMPILER_HEAP_REFS_H_
+
+#include "src/base/optional.h"
+#include "src/ic/call-optimization.h"
+#include "src/objects/elements-kind.h"
+#include "src/objects/feedback-vector.h"
+#include "src/objects/instance-type.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class CallHandlerInfo;
+class FixedDoubleArray;
+class FunctionTemplateInfo;
+class HeapNumber;
+class InternalizedString;
+class JSBoundFunction;
+class JSDataView;
+class JSGlobalProxy;
+class JSRegExp;
+class JSTypedArray;
+class NativeContext;
+class ScriptContextTable;
+class VectorSlotPair;
+
+namespace compiler {
+
+// Whether we are loading a property or storing to a property.
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
+
+enum class OddballType : uint8_t {
+ kNone, // Not an Oddball.
+ kBoolean, // True or False.
+ kUndefined,
+ kNull,
+ kHole,
+ kUninitialized,
+ kOther // Oddball, but none of the above.
+};
+
+// This list is sorted such that subtypes appear before their supertypes.
+// DO NOT VIOLATE THIS PROPERTY!
+#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSBoundFunction) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ V(JSTypedArray) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ V(Context) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ V(Symbol) \
+ /* Subtypes of HeapObject */ \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(CallHandlerInfo) \
+ V(Cell) \
+ V(Code) \
+ V(DescriptorArray) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArrayBase) \
+ V(FunctionTemplateInfo) \
+ V(HeapNumber) \
+ V(JSObject) \
+ V(Map) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(SourceTextModule) \
+ /* Subtypes of Object */ \
+ V(HeapObject)
+
+class CompilationDependencies;
+class JSHeapBroker;
+class ObjectData;
+class PerIsolateCompilerCache;
+class PropertyAccessInfo;
+#define FORWARD_DECL(Name) class Name##Ref;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+class V8_EXPORT_PRIVATE ObjectRef {
+ public:
+ ObjectRef(JSHeapBroker* broker, Handle<Object> object);
+ ObjectRef(JSHeapBroker* broker, ObjectData* data)
+ : data_(data), broker_(broker) {
+ CHECK_NOT_NULL(data_);
+ }
+
+ Handle<Object> object() const;
+
+ bool equals(const ObjectRef& other) const;
+
+ bool IsSmi() const;
+ int AsSmi() const;
+
+#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+#undef HEAP_IS_METHOD_DECL
+
+#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+#undef HEAP_AS_METHOD_DECL
+
+ bool IsNullOrUndefined() const;
+
+ bool BooleanValue() const;
+ Maybe<double> OddballToNumber() const;
+
+ // Return the element at key {index} if {index} is known to be an own data
+ // property of the object that is non-writable and non-configurable.
+ base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
+ bool serialize = false) const;
+
+ Isolate* isolate() const;
+
+ struct Hash {
+ size_t operator()(const ObjectRef& ref) const {
+ return base::hash_combine(ref.object().address());
+ }
+ };
+ struct Equal {
+ bool operator()(const ObjectRef& lhs, const ObjectRef& rhs) const {
+ return lhs.equals(rhs);
+ }
+ };
+
+ protected:
+ JSHeapBroker* broker() const;
+ ObjectData* data() const;
+ ObjectData* data_; // Should be used only by object() getters.
+
+ private:
+ friend class FunctionTemplateInfoRef;
+ friend class JSArrayData;
+ friend class JSGlobalProxyRef;
+ friend class JSGlobalProxyData;
+ friend class JSObjectData;
+ friend class StringData;
+
+ friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
+
+ JSHeapBroker* broker_;
+};
+
+// Temporary class that carries information from a Map. We'd like to remove
+// this class and use MapRef instead, but we can't as long as we support the
+// kDisabled broker mode. That's because obtaining the MapRef via
+// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
+// During OptimizeGraph we generally don't have a HandleScope, however. There
+// are two places where we therefore use GetHeapObjectType() instead. Both that
+// function and this class should eventually be removed.
+class HeapObjectType {
+ public:
+ enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
+
+ using Flags = base::Flags<Flag>;
+
+ HeapObjectType(InstanceType instance_type, Flags flags,
+ OddballType oddball_type)
+ : instance_type_(instance_type),
+ oddball_type_(oddball_type),
+ flags_(flags) {
+ DCHECK_EQ(instance_type == ODDBALL_TYPE,
+ oddball_type != OddballType::kNone);
+ }
+
+ OddballType oddball_type() const { return oddball_type_; }
+ InstanceType instance_type() const { return instance_type_; }
+ Flags flags() const { return flags_; }
+
+ bool is_callable() const { return flags_ & kCallable; }
+ bool is_undetectable() const { return flags_ & kUndetectable; }
+
+ private:
+ InstanceType const instance_type_;
+ OddballType const oddball_type_;
+ Flags const flags_;
+};
+
+class HeapObjectRef : public ObjectRef {
+ public:
+ using ObjectRef::ObjectRef;
+ Handle<HeapObject> object() const;
+
+ MapRef map() const;
+
+ // See the comment on the HeapObjectType class.
+ HeapObjectType GetHeapObjectType() const;
+};
+
+class PropertyCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<PropertyCell> object() const;
+
+ PropertyDetails property_details() const;
+
+ void Serialize();
+ ObjectRef value() const;
+};
+
+class JSObjectRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<JSObject> object() const;
+
+ uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
+ double RawFastDoublePropertyAt(FieldIndex index) const;
+ ObjectRef RawFastPropertyAt(FieldIndex index) const;
+
+ // Return the value of the property identified by the field {index}
+ // if {index} is known to be an own data property of the object.
+ base::Optional<ObjectRef> GetOwnProperty(Representation field_representation,
+ FieldIndex index,
+ bool serialize = false) const;
+
+ FixedArrayBaseRef elements() const;
+ void SerializeElements();
+ void EnsureElementsTenured();
+ ElementsKind GetElementsKind() const;
+
+ void SerializeObjectCreateMap();
+ base::Optional<MapRef> GetObjectCreateMap() const;
+};
+
+class JSDataViewRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSDataView> object() const;
+
+ size_t byte_length() const;
+ size_t byte_offset() const;
+};
+
+class JSBoundFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSBoundFunction> object() const;
+
+ void Serialize();
+
+ // The following are available only after calling Serialize().
+ ObjectRef bound_target_function() const;
+ ObjectRef bound_this() const;
+ FixedArrayRef bound_arguments() const;
+};
+
+class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSFunction> object() const;
+
+ bool has_feedback_vector() const;
+ bool has_initial_map() const;
+ bool has_prototype() const;
+ bool PrototypeRequiresRuntimeLookup() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ // The following are available only after calling Serialize().
+ ObjectRef prototype() const;
+ MapRef initial_map() const;
+ ContextRef context() const;
+ NativeContextRef native_context() const;
+ SharedFunctionInfoRef shared() const;
+ FeedbackVectorRef feedback_vector() const;
+ int InitialMapInstanceSizeWithMinSlack() const;
+
+ bool IsSerializedForCompilation() const;
+};
+
+class JSRegExpRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSRegExp> object() const;
+
+ ObjectRef raw_properties_or_hash() const;
+ ObjectRef data() const;
+ ObjectRef source() const;
+ ObjectRef flags() const;
+ ObjectRef last_index() const;
+};
+
+class HeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<HeapNumber> object() const;
+
+ double value() const;
+};
+
+class MutableHeapNumberRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<MutableHeapNumber> object() const;
+
+ double value() const;
+};
+
+class ContextRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Context> object() const;
+
+ // {previous} decrements {depth} by 1 for each previous link successfully
+ // followed. If {depth} != 0 on function return, then it only got
+ // partway to the desired depth. If {serialize} is true, then
+ // {previous} will cache its findings.
+ ContextRef previous(size_t* depth, bool serialize = false) const;
+
+ // Only returns a value if the index is valid for this ContextRef.
+ base::Optional<ObjectRef> get(int index, bool serialize = false) const;
+
+ // We only serialize the ScopeInfo if certain Promise
+ // builtins are called.
+ void SerializeScopeInfo();
+ base::Optional<ScopeInfoRef> scope_info() const;
+};
+
+#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ V(JSFunction, array_function) \
+ V(JSFunction, boolean_function) \
+ V(JSFunction, bigint_function) \
+ V(JSFunction, number_function) \
+ V(JSFunction, object_function) \
+ V(JSFunction, promise_function) \
+ V(JSFunction, promise_then) \
+ V(JSFunction, string_function) \
+ V(JSFunction, symbol_function) \
+ V(JSGlobalProxy, global_proxy_object) \
+ V(JSObject, promise_prototype) \
+ V(Map, bound_function_with_constructor_map) \
+ V(Map, bound_function_without_constructor_map) \
+ V(Map, fast_aliased_arguments_map) \
+ V(Map, initial_array_iterator_map) \
+ V(Map, initial_string_iterator_map) \
+ V(Map, iterator_result_map) \
+ V(Map, js_array_holey_double_elements_map) \
+ V(Map, js_array_holey_elements_map) \
+ V(Map, js_array_holey_smi_elements_map) \
+ V(Map, js_array_packed_double_elements_map) \
+ V(Map, js_array_packed_elements_map) \
+ V(Map, js_array_packed_smi_elements_map) \
+ V(Map, sloppy_arguments_map) \
+ V(Map, slow_object_with_null_prototype_map) \
+ V(Map, strict_arguments_map) \
+ V(ScriptContextTable, script_context_table) \
+ V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
+ V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_then_finally_shared_fun) \
+ V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
+
+// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
+// happened when Turbofan is invoked via --always-opt.
+#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
+ V(Map, async_function_object_map) \
+ V(Map, map_key_iterator_map) \
+ V(Map, map_key_value_iterator_map) \
+ V(Map, map_value_iterator_map) \
+ V(JSFunction, regexp_exec_function) \
+ V(Map, set_key_value_iterator_map) \
+ V(Map, set_value_iterator_map)
+
+#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
+
+class NativeContextRef : public ContextRef {
+ public:
+ using ContextRef::ContextRef;
+ Handle<NativeContext> object() const;
+
+ void Serialize();
+
+#define DECL_ACCESSOR(type, name) type##Ref name() const;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ ScopeInfoRef scope_info() const;
+ MapRef GetFunctionMapFromIndex(int index) const;
+ MapRef GetInitialJSArrayMap(ElementsKind kind) const;
+ base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
+};
+
+class NameRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Name> object() const;
+
+ bool IsUniqueName() const;
+};
+
+class ScriptContextTableRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScriptContextTable> object() const;
+
+ struct LookupResult {
+ ContextRef context;
+ bool immutable;
+ int index;
+ };
+
+ base::Optional<LookupResult> lookup(const NameRef& name) const;
+};
+
+class DescriptorArrayRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<DescriptorArray> object() const;
+};
+
+class FeedbackCellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackCell> object() const;
+
+ HeapObjectRef value() const;
+};
+
+class FeedbackVectorRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FeedbackVector> object() const;
+
+ ObjectRef get(FeedbackSlot slot) const;
+
+ void SerializeSlots();
+};
+
+class CallHandlerInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<CallHandlerInfo> object() const;
+
+ Address callback() const;
+
+ void Serialize();
+ ObjectRef data() const;
+};
+
+class AllocationSiteRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<AllocationSite> object() const;
+
+ bool PointsToLiteral() const;
+ AllocationType GetAllocationType() const;
+ ObjectRef nested_site() const;
+
+ // {IsFastLiteral} determines whether the given array or object literal
+ // boilerplate satisfies all limits to be considered for fast deep-copying
+ // and computes the total size of all objects that are part of the graph.
+ //
+ // If PointsToLiteral() is false, then IsFastLiteral() is also false.
+ bool IsFastLiteral() const;
+ // We only serialize boilerplate if IsFastLiteral is true.
+ base::Optional<JSObjectRef> boilerplate() const;
+
+ ElementsKind GetElementsKind() const;
+ bool CanInlineCall() const;
+};
+
+class BigIntRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<BigInt> object() const;
+
+ uint64_t AsUint64() const;
+};
+
+class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Map> object() const;
+
+ int instance_size() const;
+ InstanceType instance_type() const;
+ int GetInObjectProperties() const;
+ int GetInObjectPropertiesStartInWords() const;
+ int NumberOfOwnDescriptors() const;
+ int GetInObjectPropertyOffset(int index) const;
+ int constructor_function_index() const;
+ int NextFreePropertyIndex() const;
+ int UnusedPropertyFields() const;
+ ElementsKind elements_kind() const;
+ bool is_stable() const;
+ bool is_extensible() const;
+ bool is_constructor() const;
+ bool has_prototype_slot() const;
+ bool is_access_check_needed() const;
+ bool is_deprecated() const;
+ bool CanBeDeprecated() const;
+ bool CanTransition() const;
+ bool IsInobjectSlackTrackingInProgress() const;
+ bool is_dictionary_map() const;
+ bool IsFixedCowArrayMap() const;
+ bool IsPrimitiveMap() const;
+ bool is_undetectable() const;
+ bool is_callable() const;
+ bool has_indexed_interceptor() const;
+ bool is_migration_target() const;
+ bool supports_fast_array_iteration() const;
+ bool supports_fast_array_resize() const;
+ bool IsMapOfCurrentGlobalProxy() const;
+
+ OddballType oddball_type() const;
+
+#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
+ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
+ void SerializeBackPointer();
+ HeapObjectRef GetBackPointer() const;
+
+ void SerializePrototype();
+ bool serialized_prototype() const;
+ HeapObjectRef prototype() const;
+
+ void SerializeForElementLoad();
+
+ void SerializeForElementStore();
+ bool HasOnlyStablePrototypesWithFastElements(
+ ZoneVector<MapRef>* prototype_maps);
+
+ // Concerning the underlying instance_descriptors:
+ void SerializeOwnDescriptors();
+ void SerializeOwnDescriptor(int descriptor_index);
+ MapRef FindFieldOwner(int descriptor_index) const;
+ PropertyDetails GetPropertyDetails(int descriptor_index) const;
+ NameRef GetPropertyKey(int descriptor_index) const;
+ FieldIndex GetFieldIndexFor(int descriptor_index) const;
+ ObjectRef GetFieldType(int descriptor_index) const;
+ bool IsUnboxedDoubleField(int descriptor_index) const;
+
+ // Available after calling JSFunctionRef::Serialize on a function that has
+ // this map as initial map.
+ ObjectRef GetConstructor() const;
+ base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
+};
+
+struct HolderLookupResult {
+ HolderLookupResult(CallOptimization::HolderLookup lookup_ =
+ CallOptimization::kHolderNotFound,
+ base::Optional<JSObjectRef> holder_ = base::nullopt)
+ : lookup(lookup_), holder(holder_) {}
+ CallOptimization::HolderLookup lookup;
+ base::Optional<JSObjectRef> holder;
+};
+
+class FunctionTemplateInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FunctionTemplateInfo> object() const;
+
+ bool is_signature_undefined() const;
+ bool accept_any_receiver() const;
+ // The following returns true if the CallHandlerInfo is present.
+ bool has_call_code() const;
+
+ void SerializeCallCode();
+ base::Optional<CallHandlerInfoRef> call_code() const;
+
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map,
+ bool serialize);
+};
+
+class FixedArrayBaseRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<FixedArrayBase> object() const;
+
+ int length() const;
+};
+
+class FixedArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedArray> object() const;
+
+ ObjectRef get(int i) const;
+};
+
+class FixedDoubleArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<FixedDoubleArray> object() const;
+
+ double get_scalar(int i) const;
+ bool is_the_hole(int i) const;
+};
+
+class BytecodeArrayRef : public FixedArrayBaseRef {
+ public:
+ using FixedArrayBaseRef::FixedArrayBaseRef;
+ Handle<BytecodeArray> object() const;
+
+ int register_count() const;
+ int parameter_count() const;
+ interpreter::Register incoming_new_target_or_generator_register() const;
+
+ // Bytecode access methods.
+ uint8_t get(int index) const;
+ Address GetFirstBytecodeAddress() const;
+
+ // Source position table.
+ const byte* source_positions_address() const;
+ int source_positions_size() const;
+
+ // Constant pool access.
+ Handle<Object> GetConstantAtIndex(int index) const;
+ bool IsConstantAtIndexSmi(int index) const;
+ Smi GetConstantAtIndexAsSmi(int index) const;
+
+ // Exception handler table.
+ Address handler_table_address() const;
+ int handler_table_size() const;
+
+ bool IsSerializedForCompilation() const;
+ void SerializeForCompilation();
+};
+
+class JSArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSArray> object() const;
+
+ ObjectRef length() const;
+
+ // Return the element at key {index} if the array has a copy-on-write elements
+ // storage and {index} is known to be an own data property.
+ base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
+ bool serialize = false) const;
+};
+
+class ScopeInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<ScopeInfo> object() const;
+
+ int ContextLength() const;
+};
+
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(bool, is_safe_to_skip_arguments_adaptor) \
+ V(bool, IsInlineable) \
+ V(int, StartPosition) \
+ V(bool, is_compiled)
+
+class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SharedFunctionInfo> object() const;
+
+ int builtin_id() const;
+ BytecodeArrayRef GetBytecodeArray() const;
+
+#define DECL_ACCESSOR(type, name) type name() const;
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SetSerializedForCompilation(FeedbackVectorRef feedback);
+
+ // Template objects may not be created at compilation time. This method
+ // wraps the retrieval of the template object and creates it if
+ // necessary.
+ JSArrayRef GetTemplateObject(ObjectRef description, FeedbackVectorRef vector,
+ FeedbackSlot slot, bool serialize = false);
+
+ void SerializeFunctionTemplateInfo();
+ base::Optional<FunctionTemplateInfoRef> function_template_info() const;
+};
+
+class StringRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<String> object() const;
+
+ int length() const;
+ uint16_t GetFirstChar();
+ base::Optional<double> ToNumber();
+ bool IsSeqString() const;
+ bool IsExternalString() const;
+};
+
+class SymbolRef : public NameRef {
+ public:
+ using NameRef::NameRef;
+ Handle<Symbol> object() const;
+};
+
+class JSTypedArrayRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSTypedArray> object() const;
+
+ bool is_on_heap() const;
+ size_t length() const;
+ void* external_pointer() const;
+
+ void Serialize();
+ bool serialized() const;
+
+ HeapObjectRef buffer() const;
+};
+
+class SourceTextModuleRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<SourceTextModule> object() const;
+
+ void Serialize();
+
+ CellRef GetCell(int cell_index) const;
+};
+
+class CellRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Cell> object() const;
+
+ ObjectRef value() const;
+};
+
+class JSGlobalProxyRef : public JSObjectRef {
+ public:
+ using JSObjectRef::JSObjectRef;
+ Handle<JSGlobalProxy> object() const;
+
+ // If {serialize} is false:
+ // If the property is known to exist as a property cell (on the global
+ // object), return that property cell. Otherwise (not known to exist as a
+ // property cell or known not to exist as a property cell) return nothing.
+ // If {serialize} is true:
+ // Like above but potentially access the heap and serialize the necessary
+ // information.
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
+ bool serialize = false) const;
+};
+
+class CodeRef : public HeapObjectRef {
+ public:
+ using HeapObjectRef::HeapObjectRef;
+ Handle<Code> object() const;
+};
+
+class InternalizedStringRef : public StringRef {
+ public:
+ using StringRef::StringRef;
+ Handle<InternalizedString> object() const;
+};
+
+class ElementAccessFeedback;
+class NamedAccessFeedback;
+
+class ProcessedFeedback : public ZoneObject {
+ public:
+ enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
+ Kind kind() const { return kind_; }
+
+ ElementAccessFeedback const* AsElementAccess() const;
+ NamedAccessFeedback const* AsNamedAccess() const;
+
+ protected:
+ explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
+
+ private:
+ Kind const kind_;
+};
+
+class InsufficientFeedback final : public ProcessedFeedback {
+ public:
+ InsufficientFeedback();
+};
+
+class GlobalAccessFeedback : public ProcessedFeedback {
+ public:
+ explicit GlobalAccessFeedback(PropertyCellRef cell);
+ GlobalAccessFeedback(ContextRef script_context, int slot_index,
+ bool immutable);
+
+ bool IsPropertyCell() const;
+ PropertyCellRef property_cell() const;
+
+ bool IsScriptContextSlot() const { return !IsPropertyCell(); }
+ ContextRef script_context() const;
+ int slot_index() const;
+ bool immutable() const;
+
+ base::Optional<ObjectRef> GetConstantHint() const;
+
+ private:
+ ObjectRef const cell_or_context_;
+ int const index_and_immutable_;
+};
+
+class KeyedAccessMode {
+ public:
+ static KeyedAccessMode FromNexus(FeedbackNexus const& nexus);
+
+ AccessMode access_mode() const;
+ bool IsLoad() const;
+ bool IsStore() const;
+ KeyedAccessLoadMode load_mode() const;
+ KeyedAccessStoreMode store_mode() const;
+
+ private:
+ AccessMode const access_mode_;
+ union LoadStoreMode {
+ LoadStoreMode(KeyedAccessLoadMode load_mode);
+ LoadStoreMode(KeyedAccessStoreMode store_mode);
+ KeyedAccessLoadMode load_mode;
+ KeyedAccessStoreMode store_mode;
+ } const load_store_mode_;
+
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessLoadMode load_mode);
+ KeyedAccessMode(AccessMode access_mode, KeyedAccessStoreMode store_mode);
+};
+
+class ElementAccessFeedback : public ProcessedFeedback {
+ public:
+ ElementAccessFeedback(Zone* zone, KeyedAccessMode const& keyed_mode);
+
+ // No transition sources appear in {receiver_maps}.
+ // All transition targets appear in {receiver_maps}.
+ ZoneVector<Handle<Map>> receiver_maps;
+ ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
+
+ KeyedAccessMode const keyed_mode;
+
+ class MapIterator {
+ public:
+ bool done() const;
+ void advance();
+ MapRef current() const;
+
+ private:
+ friend class ElementAccessFeedback;
+
+ explicit MapIterator(ElementAccessFeedback const& processed,
+ JSHeapBroker* broker);
+
+ ElementAccessFeedback const& processed_;
+ JSHeapBroker* const broker_;
+ size_t index_ = 0;
+ };
+
+ // Iterator over all maps: first {receiver_maps}, then transition sources.
+ MapIterator all_maps(JSHeapBroker* broker) const;
+};
+
+class NamedAccessFeedback : public ProcessedFeedback {
+ public:
+ NamedAccessFeedback(NameRef const& name,
+ ZoneVector<PropertyAccessInfo> const& access_infos);
+
+ NameRef const& name() const { return name_; }
+ ZoneVector<PropertyAccessInfo> const& access_infos() const {
+ return access_infos_;
+ }
+
+ private:
+ NameRef const name_;
+ ZoneVector<PropertyAccessInfo> const access_infos_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_HEAP_REFS_H_
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 3430b6b339..eda866e5f2 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -97,7 +97,10 @@ int GetReturnCountAfterLowering(CallDescriptor* call_descriptor) {
int GetParameterIndexAfterLowering(
Signature<MachineRepresentation>* signature, int old_index) {
int result = old_index;
- for (int i = 0; i < old_index; i++) {
+ // Be robust towards special indexes (>= param count).
+ int max_to_check =
+ std::min(old_index, static_cast<int>(signature->parameter_count()));
+ for (int i = 0; i < max_to_check; i++) {
if (signature->GetParam(i) == MachineRepresentation::kWord64) {
result++;
}
@@ -142,16 +145,16 @@ int Int64Lowering::GetParameterCountAfterLowering(
signature, static_cast<int>(signature->parameter_count()));
}
-void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
- Node*& index_high) {
+void Int64Lowering::GetIndexNodes(Node* index, Node** index_low,
+ Node** index_high) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- index_low = index;
- index_high = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
+ *index_low = index;
+ *index_high = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
#elif defined(V8_TARGET_BIG_ENDIAN)
- index_low = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(4)));
- index_high = index;
+ *index_low = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+ *index_high = index;
#endif
}
@@ -182,7 +185,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
const Operator* load_op;
if (node->opcode() == IrOpcode::kLoad) {
@@ -232,7 +235,7 @@ void Int64Lowering::LowerNode(Node* node) {
Node* index = node->InputAt(1);
Node* index_low;
Node* index_high;
- GetIndexNodes(index, index_low, index_high);
+ GetIndexNodes(index, &index_low, &index_high);
Node* value = node->InputAt(2);
DCHECK(HasReplacementLow(value));
DCHECK(HasReplacementHigh(value));
@@ -291,12 +294,6 @@ void Int64Lowering::LowerNode(Node* node) {
// changes.
if (GetParameterCountAfterLowering(signature()) != param_count) {
int old_index = ParameterIndexOf(node->op());
- // Prevent special lowering of wasm's instance or JS
- // context/closure parameters.
- if (old_index <= 0 || old_index > param_count) {
- DefaultLowering(node);
- break;
- }
// Adjust old_index to be compliant with the signature.
--old_index;
int new_index = GetParameterIndexAfterLowering(signature(), old_index);
@@ -304,6 +301,12 @@ void Int64Lowering::LowerNode(Node* node) {
++new_index;
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+ if (old_index < 0 || old_index >= param_count) {
+ // Special parameters (JS closure/context) don't have kWord64
+ // representation anyway.
+ break;
+ }
+
if (signature()->GetParam(old_index) ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(common()->Parameter(new_index + 1),
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
index b083805771..9c77cf41a3 100644
--- a/deps/v8/src/compiler/int64-lowering.h
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -59,7 +59,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
bool HasReplacementHigh(Node* node);
Node* GetReplacementHigh(Node* node);
void PreparePhiReplacement(Node* phi);
- void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
+ void GetIndexNodes(Node* index, Node** index_low, Node** index_high);
void ReplaceNodeWithProjections(Node* node);
void LowerMemoryBaseAndIndex(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index d58331c85e..8128f89949 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -179,6 +179,100 @@ Reduction JSCallReducer::ReduceMathMinMax(Node* node, const Operator* op,
return Replace(value);
}
+// ES section #sec-math.hypot Math.hypot ( value1, value2, ...values )
+Reduction JSCallReducer::ReduceMathHypot(Node* node) {
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ Node* value = jsgraph()->ZeroConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ NodeVector values(graph()->zone());
+
+ Node* max = effect =
+ graph()->NewNode(simplified()->SpeculativeToNumber(
+ NumberOperationHint::kNumberOrOddball, p.feedback()),
+ NodeProperties::GetValueInput(node, 2), effect, control);
+ max = graph()->NewNode(simplified()->NumberAbs(), max);
+ values.push_back(max);
+ for (int i = 3; i < node->op()->ValueInputCount(); ++i) {
+ Node* input = effect = graph()->NewNode(
+ simplified()->SpeculativeToNumber(NumberOperationHint::kNumberOrOddball,
+ p.feedback()),
+ NodeProperties::GetValueInput(node, i), effect, control);
+ input = graph()->NewNode(simplified()->NumberAbs(), input);
+ values.push_back(input);
+
+ // Make sure {max} is NaN in the end in case any argument was NaN.
+ max = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged),
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), input, max),
+ max, input);
+ }
+
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* vtrue0 = jsgraph()->ZeroConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* vfalse0;
+ {
+ Node* check1 = graph()->NewNode(simplified()->NumberEqual(), max,
+ jsgraph()->Constant(V8_INFINITY));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 = jsgraph()->Constant(V8_INFINITY);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1;
+ {
+ // Kahan summation to avoid rounding errors.
+ // Normalize the numbers to the largest one to avoid overflow.
+ Node* sum = jsgraph()->ZeroConstant();
+ Node* compensation = jsgraph()->ZeroConstant();
+ for (Node* value : values) {
+ Node* n = graph()->NewNode(simplified()->NumberDivide(), value, max);
+ Node* summand = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberMultiply(), n, n),
+ compensation);
+ Node* preliminary =
+ graph()->NewNode(simplified()->NumberAdd(), sum, summand);
+ compensation = graph()->NewNode(
+ simplified()->NumberSubtract(),
+ graph()->NewNode(simplified()->NumberSubtract(), preliminary, sum),
+ summand);
+ sum = preliminary;
+ }
+ vfalse1 = graph()->NewNode(
+ simplified()->NumberMultiply(),
+ graph()->NewNode(simplified()->NumberSqrt(), sum), max);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
+ vfalse0, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSCallReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSConstruct:
@@ -274,6 +368,8 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
// ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
size_t arity = p.arity();
@@ -381,9 +477,17 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
}
// Change {node} to the new {JSCall} operator.
+ // TODO(mslekova): Since this introduces a Call that will get optimized by
+ // the JSCallReducer, we basically might have to do all the serialization
+ // that we do for that here as well. The only difference is that here we
+ // disable speculation (cf. the empty VectorSlotPair above), causing the
+ // JSCallReducer to do much less work. We should revisit this later.
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -496,6 +600,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
// ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
Node* target = NodeProperties::GetValueInput(node, 0);
@@ -508,6 +614,10 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasValue()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ if (FLAG_concurrent_inlining && !function.serialized()) {
+ TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
+ return NoChange();
+ }
context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
@@ -537,6 +647,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
NodeProperties::ChangeOp(
node,
javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode));
+ // TODO(mslekova): Remove once ReduceJSCall is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
// Try to further reduce the JSCall {node}.
Reduction const reduction = ReduceJSCall(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -588,7 +701,6 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
MapRef object_map(broker(), object_maps[i]);
object_map.SerializePrototype();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- object_map.has_hidden_prototype() ||
!object_map.prototype().equals(candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
@@ -1002,27 +1114,28 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
return true;
}
-bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
- MapHandles const& receiver_maps,
- ElementsKind* kind_return,
- bool builtin_is_push = false) {
+bool CanInlineArrayResizingBuiltin(
+ JSHeapBroker* broker, MapHandles const& receiver_maps,
+ std::vector<ElementsKind>& kinds, // NOLINT(runtime/references)
+ bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
- *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
for (auto receiver_map : receiver_maps) {
MapRef map(broker, receiver_map);
if (!map.supports_fast_array_resize()) return false;
- if (builtin_is_push) {
- if (!UnionElementsKindUptoPackedness(kind_return, map.elements_kind())) {
- return false;
- }
- } else {
- // TODO(turbofan): We should also handle fast holey double elements once
- // we got the hole NaN mess sorted out in TurboFan/V8.
- if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS ||
- !UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
- return false;
+ // TODO(turbofan): We should also handle fast holey double elements once
+ // we got the hole NaN mess sorted out in TurboFan/V8.
+ if (map.elements_kind() == HOLEY_DOUBLE_ELEMENTS && !builtin_is_push) {
+ return false;
+ }
+ ElementsKind current_kind = map.elements_kind();
+ auto kind_ptr = kinds.data();
+ size_t i;
+ for (i = 0; i < kinds.size(); i++, kind_ptr++) {
+ if (UnionElementsKindUptoPackedness(kind_ptr, current_kind)) {
+ break;
}
}
+ if (i == kinds.size()) kinds.push_back(current_kind);
}
return true;
}
@@ -2735,6 +2848,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
Reduction JSCallReducer::ReduceCallApiFunction(
Node* node, const SharedFunctionInfoRef& shared) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int const argc = static_cast<int>(p.arity()) - 2;
@@ -2750,78 +2865,21 @@ Reduction JSCallReducer::ReduceCallApiFunction(
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
- // See if we can optimize this API call to {shared}.
- Handle<FunctionTemplateInfo> function_template_info(
- FunctionTemplateInfo::cast(shared.object()->function_data()), isolate());
- CallOptimization call_optimization(isolate(), function_template_info);
- if (!call_optimization.is_simple_api_call()) return NoChange();
-
- // Try to infer the {receiver} maps from the graph.
- MapInference inference(broker(), receiver, effect);
- if (inference.HaveMaps()) {
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check that all {receiver_maps} are actually JSReceiver maps and
- // that the {function_template_info} accepts them without access
- // checks (even if "access check needed" is set for {receiver}).
- //
- // Note that we don't need to know the concrete {receiver} maps here,
- // meaning it's fine if the {receiver_maps} are unreliable, and we also
- // don't need to install any stability dependencies, since the only
- // relevant information regarding the {receiver} is the Map::constructor
- // field on the root map (which is different from the JavaScript exposed
- // "constructor" property) and that field cannot change.
- //
- // So if we know that {receiver} had a certain constructor at some point
- // in the past (i.e. it had a certain map), then this constructor is going
- // to be the same later, since this information cannot change with map
- // transitions.
- //
- // The same is true for the instance type, e.g. we still know that the
- // instance type is JSObject even if that information is unreliable, and
- // the "access check needed" bit, which also cannot change later.
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !function_template_info->accept_any_receiver())) {
- return inference.NoChange();
- }
- }
-
- // See if we can constant-fold the compatible receiver checks.
- CallOptimization::HolderLookup lookup;
- Handle<JSObject> api_holder =
- call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
- if (lookup == CallOptimization::kHolderNotFound)
- return inference.NoChange();
- for (size_t i = 1; i < receiver_maps.size(); ++i) {
- CallOptimization::HolderLookup lookupi;
- Handle<JSObject> holderi = call_optimization.LookupHolderOfExpectedType(
- receiver_maps[i], &lookupi);
- if (lookup != lookupi) return inference.NoChange();
- if (!api_holder.is_identical_to(holderi)) return inference.NoChange();
- }
+ if (!shared.function_template_info().has_value()) {
+ TRACE_BROKER_MISSING(
+ broker(), "FunctionTemplateInfo for function with SFI " << shared);
+ return NoChange();
+ }
- if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
- !inference.RelyOnMapsViaStability(dependencies())) {
- // We were not able to make the receiver maps reliable without map checks
- // but doing map checks would lead to deopt loops, so give up.
- return inference.NoChange();
- }
+ // See if we can optimize this API call to {shared}.
+ FunctionTemplateInfoRef function_template_info(
+ shared.function_template_info().value());
- // TODO(neis): The maps were used in a way that does not actually require
- // map checks or stability dependencies.
- inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
- control, p.feedback());
+ if (!function_template_info.has_call_code()) return NoChange();
- // Determine the appropriate holder for the {lookup}.
- holder = lookup == CallOptimization::kHolderFound
- ? jsgraph()->HeapConstant(api_holder)
- : receiver;
- } else if (function_template_info->accept_any_receiver() &&
- function_template_info->signature().IsUndefined(isolate())) {
- // We haven't found any {receiver_maps}, but we might still be able to
+ if (function_template_info.accept_any_receiver() &&
+ function_template_info.is_signature_undefined()) {
+ // We might be able to
// optimize the API call depending on the {function_template_info}.
// If the API function accepts any kind of {receiver}, we only need to
// ensure that the {receiver} is actually a JSReceiver at this point,
@@ -2840,51 +2898,127 @@ Reduction JSCallReducer::ReduceCallApiFunction(
graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
receiver, global_proxy, effect, control);
} else {
- // We don't have enough information to eliminate the access check
- // and/or the compatible receiver check, so use the generic builtin
- // that does those checks dynamically. This is still significantly
- // faster than the generic call sequence.
- Builtins::Name builtin_name =
- !function_template_info->accept_any_receiver()
- ? (function_template_info->signature().IsUndefined(isolate())
- ? Builtins::kCallFunctionTemplate_CheckAccess
- : Builtins::
- kCallFunctionTemplate_CheckAccessAndCompatibleReceiver)
- : Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
-
- // The CallFunctionTemplate builtin requires the {receiver} to be
- // an actual JSReceiver, so make sure we do the proper conversion
- // first if necessary.
- receiver = holder = effect =
- graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
- receiver, global_proxy, effect, control);
+ // Try to infer the {receiver} maps from the graph.
+ MapInference inference(broker(), receiver, effect);
+ if (inference.HaveMaps()) {
+ MapHandles const& receiver_maps = inference.GetMaps();
+ MapRef first_receiver_map(broker(), receiver_maps[0]);
+
+ // See if we can constant-fold the compatible receiver checks.
+ HolderLookupResult api_holder =
+ function_template_info.LookupHolderOfExpectedType(first_receiver_map,
+ false);
+ if (api_holder.lookup == CallOptimization::kHolderNotFound)
+ return inference.NoChange();
- Callable callable = Builtins::CallableFor(isolate(), builtin_name);
- auto call_descriptor = Linkage::GetStubCallDescriptor(
- graph()->zone(), callable.descriptor(),
- argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->ReplaceInput(1, jsgraph()->HeapConstant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->ReplaceInput(3, receiver); // Update receiver input.
- node->ReplaceInput(6 + argc, effect); // Update effect input.
- NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
- return Changed(node);
+ // Check that all {receiver_maps} are actually JSReceiver maps and
+ // that the {function_template_info} accepts them without access
+ // checks (even if "access check needed" is set for {receiver}).
+ //
+ // Note that we don't need to know the concrete {receiver} maps here,
+ // meaning it's fine if the {receiver_maps} are unreliable, and we also
+ // don't need to install any stability dependencies, since the only
+ // relevant information regarding the {receiver} is the Map::constructor
+ // field on the root map (which is different from the JavaScript exposed
+ // "constructor" property) and that field cannot change.
+ //
+ // So if we know that {receiver} had a certain constructor at some point
+ // in the past (i.e. it had a certain map), then this constructor is going
+ // to be the same later, since this information cannot change with map
+ // transitions.
+ //
+ // The same is true for the instance type, e.g. we still know that the
+ // instance type is JSObject even if that information is unreliable, and
+ // the "access check needed" bit, which also cannot change later.
+ CHECK(first_receiver_map.IsJSReceiverMap());
+ CHECK(!first_receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ MapRef receiver_map(broker(), receiver_maps[i]);
+ HolderLookupResult holder_i =
+ function_template_info.LookupHolderOfExpectedType(receiver_map,
+ false);
+
+ if (api_holder.lookup != holder_i.lookup) return inference.NoChange();
+ if (!(api_holder.holder.has_value() && holder_i.holder.has_value()))
+ return inference.NoChange();
+ if (!api_holder.holder->equals(*holder_i.holder))
+ return inference.NoChange();
+
+ CHECK(receiver_map.IsJSReceiverMap());
+ CHECK(!receiver_map.is_access_check_needed() ||
+ function_template_info.accept_any_receiver());
+ }
+
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation &&
+ !inference.RelyOnMapsViaStability(dependencies())) {
+ // We were not able to make the receiver maps reliable without map
+ // checks but doing map checks would lead to deopt loops, so give up.
+ return inference.NoChange();
+ }
+
+ // TODO(neis): The maps were used in a way that does not actually require
+ // map checks or stability dependencies.
+ inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
+ control, p.feedback());
+
+ // Determine the appropriate holder for the {lookup}.
+ holder = api_holder.lookup == CallOptimization::kHolderFound
+ ? jsgraph()->Constant(*api_holder.holder)
+ : receiver;
+ } else {
+ // We don't have enough information to eliminate the access check
+ // and/or the compatible receiver check, so use the generic builtin
+ // that does those checks dynamically. This is still significantly
+ // faster than the generic call sequence.
+ Builtins::Name builtin_name;
+ if (function_template_info.accept_any_receiver()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckCompatibleReceiver;
+ } else if (function_template_info.is_signature_undefined()) {
+ builtin_name = Builtins::kCallFunctionTemplate_CheckAccess;
+ } else {
+ builtin_name =
+ Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver;
+ }
+
+ // The CallFunctionTemplate builtin requires the {receiver} to be
+ // an actual JSReceiver, so make sure we do the proper conversion
+ // first if necessary.
+ receiver = holder = effect =
+ graph()->NewNode(simplified()->ConvertReceiver(p.convert_mode()),
+ receiver, global_proxy, effect, control);
+
+ Callable callable = Builtins::CallableFor(isolate(), builtin_name);
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), callable.descriptor(),
+ argc + 1 /* implicit receiver */, CallDescriptor::kNeedsFrameState);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->ReplaceInput(3, receiver); // Update receiver input.
+ node->ReplaceInput(6 + argc, effect); // Update effect input.
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+ }
}
// TODO(turbofan): Consider introducing a JSCallApiCallback operator for
// this and lower it during JSGenericLowering, and unify this with the
// JSNativeContextSpecialization::InlineApiCall method a bit.
- Handle<CallHandlerInfo> call_handler_info(
- CallHandlerInfo::cast(function_template_info->call_code()), isolate());
- Handle<Object> data(call_handler_info->data(), isolate());
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return NoChange();
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
Callable call_api_callback = CodeFactory::CallApiCallback(isolate());
CallInterfaceDescriptor cid = call_api_callback.descriptor();
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), cid, argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState);
- ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ ApiFunction api_function(call_handler_info.callback());
ExternalReference function_reference = ExternalReference::Create(
&api_function, ExternalReference::DIRECT_API_CALL);
@@ -2895,7 +3029,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
jsgraph()->HeapConstant(call_api_callback.code()));
node->ReplaceInput(1, jsgraph()->ExternalConstant(function_reference));
node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(call_handler_info.data()));
node->InsertInput(graph()->zone(), 4, holder);
node->ReplaceInput(5, receiver); // Update receiver input.
node->ReplaceInput(7 + argc, continuation_frame_state);
@@ -3495,6 +3630,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceMathUnary(node, simplified()->NumberFloor());
case Builtins::kMathFround:
return ReduceMathUnary(node, simplified()->NumberFround());
+ case Builtins::kMathHypot:
+ return ReduceMathHypot(node);
case Builtins::kMathLog:
return ReduceMathUnary(node, simplified()->NumberLog());
case Builtins::kMathLog1p:
@@ -3563,8 +3700,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceStringPrototypeStringAt(simplified()->StringCharCodeAt(),
node);
case Builtins::kStringPrototypeCodePointAt:
- return ReduceStringPrototypeStringAt(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF32), node);
+ return ReduceStringPrototypeStringAt(simplified()->StringCodePointAt(),
+ node);
case Builtins::kStringPrototypeSubstring:
return ReduceStringPrototypeSubstring(node);
case Builtins::kStringPrototypeSlice:
@@ -3642,18 +3779,23 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
return ReduceDateNow(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kBigIntAsUintN:
+ return ReduceBigIntAsUintN(node);
default:
break;
}
- if (!TracingFlags::is_runtime_stats_enabled() &&
- shared.object()->IsApiFunction()) {
+ if (shared.object()->IsApiFunction()) {
return ReduceCallApiFunction(node, shared);
}
return NoChange();
}
Reduction JSCallReducer::ReduceJSCallWithArrayLike(Node* node) {
+ // TODO(mslekova): Remove once ReduceJSCallWithArrayLike is brokerized.
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
DCHECK_EQ(IrOpcode::kJSCallWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
VectorSlotPair feedback;
@@ -4250,6 +4392,52 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+Node* JSCallReducer::LoadReceiverElementsKind(Node* receiver, Node** effect,
+ Node** control) {
+ Node* receiver_map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ receiver, *effect, *control);
+ Node* receiver_bit_field2 = *effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapBitField2()), receiver_map,
+ *effect, *control);
+ Node* receiver_elements_kind = graph()->NewNode(
+ simplified()->NumberShiftRightLogical(),
+ graph()->NewNode(simplified()->NumberBitwiseAnd(), receiver_bit_field2,
+ jsgraph()->Constant(Map::ElementsKindBits::kMask)),
+ jsgraph()->Constant(Map::ElementsKindBits::kShift));
+ return receiver_elements_kind;
+}
+
+void JSCallReducer::CheckIfElementsKind(Node* receiver_elements_kind,
+ ElementsKind kind, Node* control,
+ Node** if_true, Node** if_false) {
+ Node* is_packed_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetPackedElementsKind(kind)));
+ Node* packed_branch =
+ graph()->NewNode(common()->Branch(), is_packed_kind, control);
+ Node* if_packed = graph()->NewNode(common()->IfTrue(), packed_branch);
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* if_not_packed = graph()->NewNode(common()->IfFalse(), packed_branch);
+ Node* is_holey_kind =
+ graph()->NewNode(simplified()->NumberEqual(), receiver_elements_kind,
+ jsgraph()->Constant(GetHoleyElementsKind(kind)));
+ Node* holey_branch =
+ graph()->NewNode(common()->Branch(), is_holey_kind, if_not_packed);
+ Node* if_holey = graph()->NewNode(common()->IfTrue(), holey_branch);
+
+ Node* if_not_packed_not_holey =
+ graph()->NewNode(common()->IfFalse(), holey_branch);
+
+ *if_true = graph()->NewNode(common()->Merge(2), if_packed, if_holey);
+ *if_false = if_not_packed_not_holey;
+ } else {
+ *if_true = if_packed;
+ *if_false = graph()->NewNode(common()->IfFalse(), packed_branch);
+ }
+}
+
// ES6 section 22.1.3.18 Array.prototype.push ( )
Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -4267,81 +4455,121 @@ Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind, true)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds, true)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- for (auto& value : values) {
- if (IsSmiElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
- value, effect, control);
- } else if (IsDoubleElementsKind(kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
- value, effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* return_value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
}
- }
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
- Node* value = length;
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(
+ simplified()->CheckNumber(p.feedback()), value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
- effect, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
+ return_value = length;
- GrowFastElementsMode mode =
- IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
- elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, new_length, effect, control);
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = return_value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind)
+ ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, index, value, effect, control);
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect =
+ graph()->NewNode(simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
}
+
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(return_value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
}
// ES6 section 22.1.3.17 Array.prototype.pop ( )
@@ -4360,79 +4588,117 @@ Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
- elements, efalse, if_false);
- }
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, efalse, if_false);
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, length, efalse, if_false);
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
}
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(kind)) {
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
}
ReplaceWithValue(node, value, effect, control);
@@ -4458,151 +4724,172 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- ElementsKind kind;
- if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, &kind)) {
+ std::vector<ElementsKind> kinds;
+ if (!CanInlineArrayResizingBuiltin(broker(), receiver_maps, kinds)) {
return inference.NoChange();
}
if (!dependencies()->DependOnNoElementsProtector()) UNREACHABLE();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
- effect, control);
+ std::vector<Node*> controls_to_merge;
+ std::vector<Node*> effects_to_merge;
+ std::vector<Node*> values_to_merge;
+ Node* value = jsgraph()->UndefinedConstant();
+
+ Node* receiver_elements_kind =
+ LoadReceiverElementsKind(receiver, &effect, &control);
+ Node* next_control = control;
+ Node* next_effect = effect;
+ for (size_t i = 0; i < kinds.size(); i++) {
+ ElementsKind kind = kinds[i];
+ control = next_control;
+ effect = next_effect;
+ // We do not need branch for the last elements kind.
+ if (i != kinds.size() - 1) {
+ CheckIfElementsKind(receiver_elements_kind, kind, control, &control,
+ &next_control);
+ }
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, effect, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
{
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(kind)) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(
+ AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements, index,
- effect, control);
- effect =
- graph()->NewNode(simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(),
- index, jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
+ ElementAccess const access =
+ AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements,
+ index, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(), index,
+ jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
- receiver, length, etrue1, if_true1);
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
+ GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code =
- jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry =
- jsgraph()->ExternalConstant(ExternalReference::Create(builtin_entry));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(call_descriptor), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc, context,
- frame_state, efalse1, if_false1);
- }
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ auto call_descriptor = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
+ kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference::Create(builtin_entry));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(call_descriptor), stub_code,
+ receiver, jsgraph()->PaddingConstant(), argc,
+ target, jsgraph()->UndefinedConstant(), entry,
+ argc, context, frame_state, efalse1, if_false1);
+ }
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
// Convert the hole to undefined. Do this last, so that we can optimize
// conversion operator via some smart strength reduction in many cases.
@@ -4611,8 +4898,27 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
}
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
+ controls_to_merge.push_back(control);
+ effects_to_merge.push_back(effect);
+ values_to_merge.push_back(value);
+ }
+
+ if (controls_to_merge.size() > 1) {
+ int const count = static_cast<int>(controls_to_merge.size());
+
+ control = graph()->NewNode(common()->Merge(count), count,
+ &controls_to_merge.front());
+ effects_to_merge.push_back(control);
+ effect = graph()->NewNode(common()->EffectPhi(count), count + 1,
+ &effects_to_merge.front());
+ values_to_merge.push_back(control);
+ value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, count),
+ count + 1, &values_to_merge.front());
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
// ES6 section 22.1.3.23 Array.prototype.slice ( )
@@ -5230,8 +5536,8 @@ Reduction JSCallReducer::ReduceStringFromCodePoint(Node* node) {
graph()->NewNode(simplified()->CheckBounds(p.feedback()), input,
jsgraph()->Constant(0x10FFFF + 1), effect, control);
- Node* value = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF32), input);
+ Node* value =
+ graph()->NewNode(simplified()->StringFromSingleCodePoint(), input);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -5287,12 +5593,8 @@ Reduction JSCallReducer::ReduceStringIteratorPrototypeNext(Node* node) {
Node* vtrue0;
{
done_true = jsgraph()->FalseConstant();
- Node* codepoint = etrue0 = graph()->NewNode(
- simplified()->StringCodePointAt(UnicodeEncoding::UTF16), string, index,
- etrue0, if_true0);
- vtrue0 = graph()->NewNode(
- simplified()->StringFromSingleCodePoint(UnicodeEncoding::UTF16),
- codepoint);
+ vtrue0 = etrue0 = graph()->NewNode(simplified()->StringFromCodePointAt(),
+ string, index, etrue0, if_true0);
// Update iterator.[[NextIndex]]
Node* char_length = graph()->NewNode(simplified()->StringLength(), vtrue0);
@@ -5396,6 +5698,8 @@ Node* JSCallReducer::CreateArtificialFrameState(
}
Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5404,7 +5708,6 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
Node* target = NodeProperties::GetValueInput(node, 0);
Node* executor = NodeProperties::GetValueInput(node, 1);
Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
-
Node* context = NodeProperties::GetContextInput(node);
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -5459,7 +5762,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
// Allocate a promise context for the closures below.
Node* promise_context = effect = graph()->NewNode(
javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
+ native_context().scope_info().object(),
PromiseBuiltins::kPromiseContextLength - Context::MIN_CONTEXT_SLOTS,
FUNCTION_SCOPE),
context, effect, control);
@@ -5477,21 +5780,13 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
promise_context, jsgraph()->TrueConstant(), effect, control);
// Allocate the closure for the resolve case.
- SharedFunctionInfoRef resolve_shared =
- native_context().promise_capability_default_resolve_shared_fun();
- Node* resolve = effect = graph()->NewNode(
- javascript()->CreateClosure(
- resolve_shared.object(), factory()->many_closures_cell(),
- handle(resolve_shared.object()->GetCode(), isolate())),
+ Node* resolve = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_resolve_shared_fun(),
promise_context, effect, control);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef reject_shared =
- native_context().promise_capability_default_reject_shared_fun();
- Node* reject = effect = graph()->NewNode(
- javascript()->CreateClosure(
- reject_shared.object(), factory()->many_closures_cell(),
- handle(reject_shared.object()->GetCode(), isolate())),
+ Node* reject = effect = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_capability_default_reject_shared_fun(),
promise_context, effect, control);
const std::vector<Node*> checkpoint_parameters_continuation(
@@ -5624,6 +5919,30 @@ Reduction JSCallReducer::ReducePromiseInternalResolve(Node* node) {
return Replace(value);
}
+bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
+ if (!inference->HaveMaps()) return false;
+ MapHandles const& receiver_maps = inference->GetMaps();
+
+ // Check whether all {receiver_maps} are JSPromise maps and
+ // have the initial Promise.prototype as their [[Prototype]].
+ for (Handle<Map> map : receiver_maps) {
+ MapRef receiver_map(broker(), map);
+ if (!receiver_map.IsJSPromiseMap()) return false;
+ if (!FLAG_concurrent_inlining) {
+ receiver_map.SerializePrototype();
+ } else if (!receiver_map.serialized_prototype()) {
+ TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map);
+ return false;
+ }
+ if (!receiver_map.prototype().equals(
+ native_context().promise_prototype())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
// ES section #sec-promise.prototype.catch
Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -5637,20 +5956,7 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
return inference.NoChange();
@@ -5675,8 +5981,21 @@ Reduction JSCallReducer::ReducePromisePrototypeCatch(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+Node* JSCallReducer::CreateClosureFromBuiltinSharedFunctionInfo(
+ SharedFunctionInfoRef shared, Node* context, Node* effect, Node* control) {
+ DCHECK(shared.HasBuiltinId());
+ Callable const callable = Builtins::CallableFor(
+ isolate(), static_cast<Builtins::Name>(shared.builtin_id()));
+ return graph()->NewNode(
+ javascript()->CreateClosure(
+ shared.object(), factory()->many_closures_cell(), callable.code()),
+ context, effect, control);
+}
+
// ES section #sec-promise.prototype.finally
Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
int arity = static_cast<int>(p.arity() - 2);
@@ -5690,21 +6009,9 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
-
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
if (!dependencies()->DependOnPromiseThenProtector())
@@ -5730,13 +6037,13 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
jsgraph()->Constant(native_context().promise_function());
// Allocate shared context for the closures below.
- context = etrue = graph()->NewNode(
- javascript()->CreateFunctionContext(
- handle(native_context().object()->scope_info(), isolate()),
- PromiseBuiltins::kPromiseFinallyContextLength -
- Context::MIN_CONTEXT_SLOTS,
- FUNCTION_SCOPE),
- context, etrue, if_true);
+ context = etrue =
+ graph()->NewNode(javascript()->CreateFunctionContext(
+ native_context().scope_info().object(),
+ PromiseBuiltins::kPromiseFinallyContextLength -
+ Context::MIN_CONTEXT_SLOTS,
+ FUNCTION_SCOPE),
+ context, etrue, if_true);
etrue = graph()->NewNode(
simplified()->StoreField(
AccessBuilder::ForContextSlot(PromiseBuiltins::kOnFinallySlot)),
@@ -5747,22 +6054,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef catch_finally =
- native_context().promise_catch_finally_shared_fun();
- catch_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- catch_finally.object(), factory()->many_closures_cell(),
- handle(catch_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_catch_finally_shared_fun(), context, etrue,
+ if_true);
// Allocate the closure for the fulfill case.
- SharedFunctionInfoRef then_finally =
- native_context().promise_then_finally_shared_fun();
- then_true = etrue = graph()->NewNode(
- javascript()->CreateClosure(
- then_finally.object(), factory()->many_closures_cell(),
- handle(then_finally.object()->GetCode(), isolate())),
- context, etrue, if_true);
+ then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
+ native_context().promise_then_finally_shared_fun(), context, etrue,
+ if_true);
}
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -5810,6 +6109,8 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
}
Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
@@ -5829,20 +6130,7 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
Node* frame_state = NodeProperties::GetFrameStateInput(node);
MapInference inference(broker(), receiver, effect);
- if (!inference.HaveMaps()) return NoChange();
- MapHandles const& receiver_maps = inference.GetMaps();
-
- // Check whether all {receiver_maps} are JSPromise maps and
- // have the initial Promise.prototype as their [[Prototype]].
- for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.IsJSPromiseMap()) return inference.NoChange();
- receiver_map.SerializePrototype();
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
- return inference.NoChange();
- }
- }
+ if (!DoPromiseChecks(&inference)) return inference.NoChange();
if (!dependencies()->DependOnPromiseHookProtector())
return inference.NoChange();
@@ -5889,6 +6177,8 @@ Reduction JSCallReducer::ReducePromisePrototypeThen(Node* node) {
// ES section #sec-promise.resolve
Reduction JSCallReducer::ReducePromiseResolveTrampoline(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
+
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* value = node->op()->ValueInputCount() > 2
@@ -6828,8 +7118,11 @@ Reduction JSCallReducer::ReduceNumberParseInt(Node* node) {
}
Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
+ DisallowHeapAccessIf disallow_heap_access(FLAG_concurrent_inlining);
+
if (FLAG_force_slow_path) return NoChange();
if (node->op()->ValueInputCount() < 3) return NoChange();
+
CallParameters const& p = CallParametersOf(node->op());
if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
@@ -6846,13 +7139,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
}
MapHandles const& regexp_maps = inference.GetMaps();
- // Compute property access info for "exec" on {resolution}.
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- MapHandles(regexp_maps.begin(), regexp_maps.end()),
- factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ // Compute property access info for "exec" on {resolution}.
+ access_info_factory.ComputePropertyAccessInfos(
+ MapHandles(regexp_maps.begin(), regexp_maps.end()),
+ factory()->exec_string(), AccessMode::kLoad, &access_infos);
+ } else {
+ // Obtain precomputed access infos from the broker.
+ for (auto map : regexp_maps) {
+ MapRef map_ref(broker(), map);
+ PropertyAccessInfo access_info =
+ broker()->GetAccessInfoForLoadingExec(map_ref);
+ access_infos.push_back(access_info);
+ }
+ }
+
PropertyAccessInfo ai_exec =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -6864,34 +7168,24 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Do not reduce if the exec method is not on the prototype chain.
if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
+ JSObjectRef holder_ref(broker(), holder);
+
// Bail out if the exec method is not the original one.
- Handle<Object> constant = JSObject::FastPropertyAt(
- holder, ai_exec.field_representation(), ai_exec.field_index());
- if (!constant.is_identical_to(isolate()->regexp_exec_function())) {
+ base::Optional<ObjectRef> constant = holder_ref.GetOwnProperty(
+ ai_exec.field_representation(), ai_exec.field_index());
+ if (!constant.has_value() ||
+ !constant->equals(native_context().regexp_exec_function())) {
return inference.NoChange();
}
- // Protect the exec method change in the holder.
- Handle<Object> exec_on_proto;
- MapRef holder_map(broker(), handle(holder->map(), isolate()));
- Handle<DescriptorArray> descriptors(
- holder_map.object()->instance_descriptors(), isolate());
- int descriptor_index =
- descriptors->Search(*(factory()->exec_string()), *holder_map.object());
- CHECK_NE(descriptor_index, DescriptorArray::kNotFound);
- holder_map.SerializeOwnDescriptors();
- dependencies()->DependOnFieldType(holder_map, descriptor_index);
- } else {
- return inference.NoChange();
- }
-
- // Add proper dependencies on the {regexp}s [[Prototype]]s.
- Handle<JSObject> holder;
- if (ai_exec.holder().ToHandle(&holder)) {
+ // Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
ai_exec.receiver_maps(), kStartAtPrototype,
JSObjectRef(broker(), holder));
+ } else {
+ return inference.NoChange();
}
+
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -6955,12 +7249,47 @@ Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
return Changed(node);
}
+Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
+ if (!jsgraph()->machine()->Is64()) {
+ return NoChange();
+ }
+
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+ if (node->op()->ValueInputCount() < 3) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* bits = NodeProperties::GetValueInput(node, 2);
+ Node* value = NodeProperties::GetValueInput(node, 3);
+
+ NumberMatcher matcher(bits);
+ if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
+ const int bits_value = static_cast<int>(matcher.Value());
+ value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
+ value, effect, control);
+ value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+
+ return NoChange();
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+NativeContextRef JSCallReducer::native_context() const {
+ return broker()->native_context();
+}
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 02821ebb0d..bf3676c5b2 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -29,6 +29,7 @@ struct FieldAccess;
class JSGraph;
class JSHeapBroker;
class JSOperatorBuilder;
+class MapInference;
class NodeProperties;
class SimplifiedOperatorBuilder;
@@ -155,6 +156,7 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathMinMax(Node* node, const Operator* op, Node* empty_value);
+ Reduction ReduceMathHypot(Node* node);
Reduction ReduceNumberIsFinite(Node* node);
Reduction ReduceNumberIsInteger(Node* node);
@@ -190,6 +192,15 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceNumberConstructor(Node* node);
+ Reduction ReduceBigIntAsUintN(Node* node);
+
+ // Helper to verify promise receiver maps are as expected.
+ // On bailout from a reduction, be sure to return inference.NoChange().
+ bool DoPromiseChecks(MapInference* inference);
+
+ Node* CreateClosureFromBuiltinSharedFunctionInfo(SharedFunctionInfoRef shared,
+ Node* context, Node* effect,
+ Node* control);
// Returns the updated {to} node, and updates control and effect along the
// way.
@@ -231,12 +242,16 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
const SharedFunctionInfoRef& shared,
Node* context = nullptr);
+ void CheckIfElementsKind(Node* receiver_elements_kind, ElementsKind kind,
+ Node* control, Node** if_true, Node** if_false);
+ Node* LoadReceiverElementsKind(Node* receiver, Node** effect, Node** control);
+
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
Factory* factory() const;
- NativeContextRef native_context() const { return broker()->native_context(); }
+ NativeContextRef native_context() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index dea6d7fc2b..035e8b7ceb 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -6,6 +6,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -144,9 +145,10 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
if (!access.immutable()) {
@@ -157,8 +159,6 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
// This will hold the final value, if we can figure it out.
base::Optional<ObjectRef> maybe_value;
-
- concrete.SerializeSlot(static_cast<int>(access.index()));
maybe_value = concrete.get(static_cast<int>(access.index()));
if (maybe_value.has_value() && !maybe_value->IsSmi()) {
// Even though the context slot is immutable, the context might have escaped
@@ -174,6 +174,9 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
}
if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "slot value " << access.index()
+ << " for context "
+ << concrete);
return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
@@ -207,9 +210,10 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// Now walk up the concrete context chain for the remaining depth.
ContextRef concrete = maybe_concrete.value();
- concrete.SerializeContextChain(); // TODO(neis): Remove later.
- for (; depth > 0; --depth) {
- concrete = concrete.previous();
+ concrete = concrete.previous(&depth);
+ if (depth > 0) {
+ TRACE_BROKER_MISSING(broker(), "previous value for context " << concrete);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
}
return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 8fc8dd1308..4e69db6b9b 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -837,7 +837,7 @@ Reduction JSCreateLowering::ReduceJSCreateCollectionIterator(Node* node) {
simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
iterated_object, effect, control);
- // Create the JSArrayIterator result.
+ // Create the JSCollectionIterator result.
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(JSCollectionIterator::kSize, AllocationType::kYoung,
Type::OtherObject());
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index a3805ec125..43a4beadee 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) {
DEFINE_GETTER(AllocateInYoungGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration)))
+DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInYoungGeneration)))
+
DEFINE_GETTER(AllocateInOldGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration)))
+DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant,
+ HeapConstant(BUILTIN_CODE(isolate(),
+ AllocateRegularInOldGeneration)))
+
DEFINE_GETTER(ArrayConstructorStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index b5c80515ad..ec36c26034 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
void GetCachedNodes(NodeVector* nodes);
// Cached global nodes.
-#define CACHED_GLOBAL_LIST(V) \
- V(AllocateInYoungGenerationStubConstant) \
- V(AllocateInOldGenerationStubConstant) \
- V(ArrayConstructorStubConstant) \
- V(BigIntMapConstant) \
- V(BooleanMapConstant) \
- V(ToNumberBuiltinConstant) \
- V(EmptyFixedArrayConstant) \
- V(EmptyStringConstant) \
- V(FixedArrayMapConstant) \
- V(PropertyArrayMapConstant) \
- V(FixedDoubleArrayMapConstant) \
- V(HeapNumberMapConstant) \
- V(OptimizedOutConstant) \
- V(StaleRegisterConstant) \
- V(UndefinedConstant) \
- V(TheHoleConstant) \
- V(TrueConstant) \
- V(FalseConstant) \
- V(NullConstant) \
- V(ZeroConstant) \
- V(OneConstant) \
- V(NaNConstant) \
- V(MinusOneConstant) \
- V(EmptyStateValues) \
+#define CACHED_GLOBAL_LIST(V) \
+ V(AllocateInYoungGenerationStubConstant) \
+ V(AllocateRegularInYoungGenerationStubConstant) \
+ V(AllocateInOldGenerationStubConstant) \
+ V(AllocateRegularInOldGenerationStubConstant) \
+ V(ArrayConstructorStubConstant) \
+ V(BigIntMapConstant) \
+ V(BooleanMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(EmptyFixedArrayConstant) \
+ V(EmptyStringConstant) \
+ V(FixedArrayMapConstant) \
+ V(PropertyArrayMapConstant) \
+ V(FixedDoubleArrayMapConstant) \
+ V(HeapNumberMapConstant) \
+ V(OptimizedOutConstant) \
+ V(StaleRegisterConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(NullConstant) \
+ V(ZeroConstant) \
+ V(OneConstant) \
+ V(NaNConstant) \
+ V(MinusOneConstant) \
+ V(EmptyStateValues) \
V(SingleDeadTypedStateValues)
// Cached global node accessor methods.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 86250e9d1f..c79c793ae6 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
@@ -12,6 +13,7 @@
#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/per-isolate-compiler-cache.h"
#include "src/compiler/vector-slot-pair.h"
@@ -26,6 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
#include "src/utils/boxed-float.h"
#include "src/utils/utils.h"
@@ -121,17 +124,31 @@ class PropertyCellData : public HeapObjectData {
ObjectData* value_ = nullptr;
};
+// TODO(mslekova): Once we have real-world usage data, we might want to
+// reimplement this as sorted vector instead, to reduce the memory overhead.
+typedef ZoneMap<MapData*, HolderLookupResult> KnownReceiversMap;
+
class FunctionTemplateInfoData : public HeapObjectData {
public:
FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object);
- void Serialize(JSHeapBroker* broker);
- ObjectData* call_code() const { return call_code_; }
+ bool is_signature_undefined() const { return is_signature_undefined_; }
+ bool accept_any_receiver() const { return accept_any_receiver_; }
+ bool has_call_code() const { return has_call_code_; }
+
+ void SerializeCallCode(JSHeapBroker* broker);
+ CallHandlerInfoData* call_code() const { return call_code_; }
+ KnownReceiversMap& known_receivers() { return known_receivers_; }
private:
- bool serialized_ = false;
- ObjectData* call_code_ = nullptr;
+ bool serialized_call_code_ = false;
+ CallHandlerInfoData* call_code_ = nullptr;
+ bool is_signature_undefined_ = false;
+ bool accept_any_receiver_ = false;
+ bool has_call_code_ = false;
+
+ KnownReceiversMap known_receivers_;
};
class CallHandlerInfoData : public HeapObjectData {
@@ -154,7 +171,16 @@ class CallHandlerInfoData : public HeapObjectData {
FunctionTemplateInfoData::FunctionTemplateInfoData(
JSHeapBroker* broker, ObjectData** storage,
Handle<FunctionTemplateInfo> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object),
+ known_receivers_(broker->zone()) {
+ auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
+ is_signature_undefined_ =
+ function_template_info->signature().IsUndefined(broker->isolate());
+ accept_any_receiver_ = function_template_info->accept_any_receiver();
+
+ CallOptimization call_optimization(broker->isolate(), object);
+ has_call_code_ = call_optimization.is_simple_api_call();
+}
CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
ObjectData** storage,
@@ -181,18 +207,17 @@ void PropertyCellData::Serialize(JSHeapBroker* broker) {
value_ = broker->GetOrCreateData(cell->value());
}
-void FunctionTemplateInfoData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
+void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
+ if (serialized_call_code_) return;
+ serialized_call_code_ = true;
- TraceScope tracer(broker, this, "FunctionTemplateInfoData::Serialize");
+ TraceScope tracer(broker, this,
+ "FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
DCHECK_NULL(call_code_);
- call_code_ = broker->GetOrCreateData(function_template_info->call_code());
-
- if (call_code_->IsCallHandlerInfo()) {
- call_code_->AsCallHandlerInfo()->Serialize(broker);
- }
+ call_code_ = broker->GetOrCreateData(function_template_info->call_code())
+ ->AsCallHandlerInfo();
+ call_code_->Serialize(broker);
}
void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
@@ -231,6 +256,12 @@ class JSObjectField {
uint64_t number_bits_ = 0;
};
+struct FieldIndexHasher {
+ size_t operator()(FieldIndex field_index) const {
+ return field_index.index();
+ }
+};
+
class JSObjectData : public HeapObjectData {
public:
JSObjectData(JSHeapBroker* broker, ObjectData** storage,
@@ -253,12 +284,15 @@ class JSObjectData : public HeapObjectData {
ObjectData* GetOwnConstantElement(JSHeapBroker* broker, uint32_t index,
bool serialize);
+ ObjectData* GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index, bool serialize);
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
private:
- void SerializeRecursive(JSHeapBroker* broker, int max_depths);
+ void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
FixedArrayBaseData* elements_ = nullptr;
bool cow_or_empty_elements_tenured_ = false;
@@ -277,6 +311,12 @@ class JSObjectData : public HeapObjectData {
// non-configurable, or (2) are known not to (possibly they don't exist at
// all). In case (2), the second pair component is nullptr.
ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
+ // Properties that either:
+ // (1) are known to exist directly on the object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ // For simplicity, this may in theory overlap with inobject_fields_.
+ ZoneUnorderedMap<FieldIndex, ObjectData*, FieldIndexHasher> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
@@ -312,6 +352,15 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
}
return base::nullopt;
}
+
+ObjectRef GetOwnPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ Representation representation,
+ FieldIndex field_index) {
+ Handle<Object> constant =
+ JSObject::FastPropertyAt(receiver, representation, field_index);
+ return ObjectRef(broker, constant);
+}
} // namespace
ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
@@ -333,6 +382,27 @@ ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
return result;
}
+ObjectData* JSObjectData::GetOwnProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index,
+ bool serialize) {
+ auto p = own_properties_.find(field_index);
+ if (p != own_properties_.end()) return p->second;
+
+ if (!serialize) {
+ TRACE_MISSING(broker, "knowledge about property with index "
+ << field_index.property_index() << " on "
+ << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), representation, field_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(field_index, result));
+ return result;
+}
+
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -503,24 +573,18 @@ class ContextData : public HeapObjectData {
public:
ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object);
- void SerializeContextChain(JSHeapBroker* broker);
- ContextData* previous() const {
- CHECK(serialized_context_chain_);
- return previous_;
- }
+ // {previous} will return the closest valid context possible to desired
+ // {depth}, decrementing {depth} for each previous link successfully followed.
+ // If {serialize} is true, it will serialize contexts along the way.
+ ContextData* previous(JSHeapBroker* broker, size_t* depth, bool serialize);
- void SerializeSlot(JSHeapBroker* broker, int index);
-
- ObjectData* GetSlot(int index) {
- auto search = slots_.find(index);
- CHECK(search != slots_.end());
- return search->second;
- }
+ // Returns nullptr if the slot index isn't valid or wasn't serialized
+ // (unless {serialize} is true).
+ ObjectData* GetSlot(JSHeapBroker* broker, int index, bool serialize);
private:
ZoneMap<int, ObjectData*> slots_;
- bool serialized_context_chain_ = false;
ContextData* previous_ = nullptr;
};
@@ -528,28 +592,46 @@ ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object)
: HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
-void ContextData::SerializeContextChain(JSHeapBroker* broker) {
- if (serialized_context_chain_) return;
- serialized_context_chain_ = true;
+ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
+ bool serialize) {
+ if (*depth == 0) return this;
- TraceScope tracer(broker, this, "ContextData::SerializeContextChain");
- Handle<Context> context = Handle<Context>::cast(object());
+ if (serialize && previous_ == nullptr) {
+ TraceScope tracer(broker, this, "ContextData::previous");
+ Handle<Context> context = Handle<Context>::cast(object());
+ Object prev = context->unchecked_previous();
+ if (prev.IsContext()) {
+ previous_ = broker->GetOrCreateData(prev)->AsContext();
+ }
+ }
- DCHECK_NULL(previous_);
- // Context::previous DCHECK-fails when called on the native context.
- if (!context->IsNativeContext()) {
- previous_ = broker->GetOrCreateData(context->previous())->AsContext();
- previous_->SerializeContextChain(broker);
+ if (previous_ != nullptr) {
+ *depth = *depth - 1;
+ return previous_->previous(broker, depth, serialize);
}
+ return this;
}
-void ContextData::SerializeSlot(JSHeapBroker* broker, int index) {
- TraceScope tracer(broker, this, "ContextData::SerializeSlot");
- TRACE(broker, "Serializing script context slot " << index);
- Handle<Context> context = Handle<Context>::cast(object());
- CHECK(index >= 0 && index < context->length());
- ObjectData* odata = broker->GetOrCreateData(context->get(index));
- slots_.insert(std::make_pair(index, odata));
+ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
+ bool serialize) {
+ CHECK_GE(index, 0);
+ auto search = slots_.find(index);
+ if (search != slots_.end()) {
+ return search->second;
+ }
+
+ if (serialize) {
+ Handle<Context> context = Handle<Context>::cast(object());
+ if (index < context->length()) {
+ TraceScope tracer(broker, this, "ContextData::GetSlot");
+ TRACE(broker, "Serializing context slot " << index);
+ ObjectData* odata = broker->GetOrCreateData(context->get(index));
+ slots_.insert(std::make_pair(index, odata));
+ return odata;
+ }
+ }
+
+ return nullptr;
}
class NativeContextData : public ContextData {
@@ -564,6 +646,11 @@ class NativeContextData : public ContextData {
return function_maps_;
}
+ ScopeInfoData* scope_info() const {
+ CHECK(serialized_);
+ return scope_info_;
+ }
+
NativeContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<NativeContext> object);
void Serialize(JSHeapBroker* broker);
@@ -574,6 +661,7 @@ class NativeContextData : public ContextData {
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
ZoneVector<MapData*> function_maps_;
+ ScopeInfoData* scope_info_ = nullptr;
};
class NameData : public HeapObjectData {
@@ -674,14 +762,15 @@ bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
DCHECK_GE(max_depth, 0);
DCHECK_GE(*max_properties, 0);
+ Isolate* const isolate = boilerplate->GetIsolate();
+
// Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+ if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false;
// Check for too deep nesting.
if (max_depth == 0) return false;
// Check the elements.
- Isolate* const isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
if (elements->length() > 0 &&
elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
@@ -780,6 +869,18 @@ class AllocationSiteData : public HeapObjectData {
bool serialized_boilerplate_ = false;
};
+class BigIntData : public HeapObjectData {
+ public:
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
+ : HeapObjectData(broker, storage, object),
+ as_uint64_(object->AsUint64(nullptr)) {}
+
+ uint64_t AsUint64() const { return as_uint64_; }
+
+ private:
+ const uint64_t as_uint64_;
+};
+
// Only used in JSNativeContextSpecialization.
class ScriptContextTableData : public HeapObjectData {
public:
@@ -1215,7 +1316,8 @@ JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object)
: HeapObjectData(broker, storage, object),
inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()) {}
+ own_constant_elements_(broker->zone()),
+ own_properties_(broker->zone()) {}
FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<FixedArray> object)
@@ -1282,18 +1384,106 @@ class BytecodeArrayData : public FixedArrayBaseData {
return incoming_new_target_or_generator_register_;
}
+ uint8_t get(int index) const {
+ DCHECK(is_serialized_for_compilation_);
+ return bytecodes_[index];
+ }
+
+ Address GetFirstBytecodeAddress() const {
+ return reinterpret_cast<Address>(bytecodes_.data());
+ }
+
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
+ return constant_pool_[index]->object();
+ }
+
+ bool IsConstantAtIndexSmi(int index) const {
+ return constant_pool_[index]->is_smi();
+ }
+
+ Smi GetConstantAtIndexAsSmi(int index) const {
+ return *(Handle<Smi>::cast(constant_pool_[index]->object()));
+ }
+
+ bool IsSerializedForCompilation() const {
+ return is_serialized_for_compilation_;
+ }
+
+ void SerializeForCompilation(JSHeapBroker* broker) {
+ if (is_serialized_for_compilation_) return;
+
+ Handle<BytecodeArray> bytecode_array =
+ Handle<BytecodeArray>::cast(object());
+
+ DCHECK(bytecodes_.empty());
+ bytecodes_.reserve(bytecode_array->length());
+ for (int i = 0; i < bytecode_array->length(); i++) {
+ bytecodes_.push_back(bytecode_array->get(i));
+ }
+
+ DCHECK(constant_pool_.empty());
+ Handle<FixedArray> constant_pool(bytecode_array->constant_pool(),
+ broker->isolate());
+ constant_pool_.reserve(constant_pool->length());
+ for (int i = 0; i < constant_pool->length(); i++) {
+ constant_pool_.push_back(broker->GetOrCreateData(constant_pool->get(i)));
+ }
+
+ Handle<ByteArray> source_position_table(
+ bytecode_array->SourcePositionTableIfCollected(), broker->isolate());
+ source_positions_.reserve(source_position_table->length());
+ for (int i = 0; i < source_position_table->length(); i++) {
+ source_positions_.push_back(source_position_table->get(i));
+ }
+
+ Handle<ByteArray> handlers(bytecode_array->handler_table(),
+ broker->isolate());
+ handler_table_.reserve(handlers->length());
+ for (int i = 0; i < handlers->length(); i++) {
+ handler_table_.push_back(handlers->get(i));
+ }
+
+ is_serialized_for_compilation_ = true;
+ }
+
+ const byte* source_positions_address() const {
+ return source_positions_.data();
+ }
+
+ size_t source_positions_size() const { return source_positions_.size(); }
+
+ Address handler_table_address() const {
+ CHECK(is_serialized_for_compilation_);
+ return reinterpret_cast<Address>(handler_table_.data());
+ }
+
+ int handler_table_size() const {
+ CHECK(is_serialized_for_compilation_);
+ return static_cast<int>(handler_table_.size());
+ }
+
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
register_count_(object->register_count()),
parameter_count_(object->parameter_count()),
incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()) {}
+ object->incoming_new_target_or_generator_register()),
+ bytecodes_(broker->zone()),
+ source_positions_(broker->zone()),
+ handler_table_(broker->zone()),
+ constant_pool_(broker->zone()) {}
private:
int const register_count_;
int const parameter_count_;
interpreter::Register const incoming_new_target_or_generator_register_;
+
+ bool is_serialized_for_compilation_ = false;
+ ZoneVector<uint8_t> bytecodes_;
+ ZoneVector<uint8_t> source_positions_;
+ ZoneVector<uint8_t> handler_table_;
+ ZoneVector<ObjectData*> constant_pool_;
};
class JSArrayData : public JSObjectData {
@@ -1377,6 +1567,22 @@ class SharedFunctionInfoData : public HeapObjectData {
void SetSerializedForCompilation(JSHeapBroker* broker,
FeedbackVectorRef feedback);
bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
+ void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
+ FunctionTemplateInfoData* function_template_info() const {
+ return function_template_info_;
+ }
+ JSArrayData* GetTemplateObject(FeedbackSlot slot) const {
+ auto lookup_it = template_objects_.find(slot.ToInt());
+ if (lookup_it != template_objects_.cend()) {
+ return lookup_it->second;
+ }
+ return nullptr;
+ }
+ void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) {
+ CHECK(
+ template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
+ }
+
#define DECL_ACCESSOR(type, name) \
type name() const { return name##_; }
BROKER_SFI_FIELDS(DECL_ACCESSOR)
@@ -1391,6 +1597,8 @@ class SharedFunctionInfoData : public HeapObjectData {
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
+ FunctionTemplateInfoData* function_template_info_;
+ ZoneMap<int, JSArrayData*> template_objects_;
};
SharedFunctionInfoData::SharedFunctionInfoData(
@@ -1408,7 +1616,9 @@ SharedFunctionInfoData::SharedFunctionInfoData(
#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
#undef INIT_MEMBER
-{
+ ,
+ function_template_info_(nullptr),
+ template_objects_(broker->zone()) {
DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
}
@@ -1420,15 +1630,28 @@ void SharedFunctionInfoData::SetSerializedForCompilation(
<< " as serialized for compilation");
}
+void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
+ JSHeapBroker* broker) {
+ if (function_template_info_) return;
+
+ function_template_info_ =
+ broker
+ ->GetOrCreateData(handle(
+ Handle<SharedFunctionInfo>::cast(object())->function_data(),
+ broker->isolate()))
+ ->AsFunctionTemplateInfo();
+}
+
bool SharedFunctionInfoData::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
return serialized_for_compilation_.find(feedback.object()) !=
serialized_for_compilation_.end();
}
-class ModuleData : public HeapObjectData {
+class SourceTextModuleData : public HeapObjectData {
public:
- ModuleData(JSHeapBroker* broker, ObjectData** storage, Handle<Module> object);
+ SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SourceTextModule> object);
void Serialize(JSHeapBroker* broker);
CellData* GetCell(int cell_index) const;
@@ -1439,35 +1662,36 @@ class ModuleData : public HeapObjectData {
ZoneVector<CellData*> exports_;
};
-ModuleData::ModuleData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Module> object)
+SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<SourceTextModule> object)
: HeapObjectData(broker, storage, object),
imports_(broker->zone()),
exports_(broker->zone()) {}
-CellData* ModuleData::GetCell(int cell_index) const {
+CellData* SourceTextModuleData::GetCell(int cell_index) const {
CHECK(serialized_);
CellData* cell;
- switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
- case ModuleDescriptor::kImport:
- cell = imports_.at(Module::ImportIndex(cell_index));
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
break;
- case ModuleDescriptor::kExport:
- cell = exports_.at(Module::ExportIndex(cell_index));
+ case SourceTextModuleDescriptor::kExport:
+ cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
break;
- case ModuleDescriptor::kInvalid:
+ case SourceTextModuleDescriptor::kInvalid:
UNREACHABLE();
}
CHECK_NOT_NULL(cell);
return cell;
}
-void ModuleData::Serialize(JSHeapBroker* broker) {
+void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
if (serialized_) return;
serialized_ = true;
- TraceScope tracer(broker, this, "ModuleData::Serialize");
- Handle<Module> module = Handle<Module>::cast(object());
+ TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
+ Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
// TODO(neis): We could be smarter and only serialize the cells we care about.
// TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
@@ -1614,7 +1838,7 @@ bool JSObjectData::cow_or_empty_elements_tenured() const {
FixedArrayBaseData* JSObjectData::elements() const { return elements_; }
void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
- SerializeRecursive(broker, kMaxFastLiteralDepth);
+ SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
}
void JSObjectData::SerializeElements(JSHeapBroker* broker) {
@@ -1717,11 +1941,13 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
<< contents.size() << " total)");
}
-void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
+void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
+ int depth) {
if (serialized_as_boilerplate_) return;
serialized_as_boilerplate_ = true;
- TraceScope tracer(broker, this, "JSObjectData::SerializeRecursive");
+ TraceScope tracer(broker, this,
+ "JSObjectData::SerializeRecursiveAsBoilerplate");
Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
// We only serialize boilerplates that pass the IsInlinableFastLiteral
@@ -1767,7 +1993,8 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
}
} else {
@@ -1802,9 +2029,22 @@ void JSObjectData::SerializeRecursive(JSHeapBroker* broker, int depth) {
} else {
Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
isolate);
+ // In case of unboxed double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its unboxed double to a tagged representation. In the process the
+ // raw double is converted to a heap number. The sentinel value carries no
+ // special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value.
+ // We check for the sentinel here, specifically, since migrations might
+ // have been triggered as part of boilerplate serialization.
+ if (value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
+ }
ObjectData* value_data = broker->GetOrCreateData(value);
if (value->IsJSObject()) {
- value_data->AsJSObject()->SerializeRecursive(broker, depth - 1);
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
}
inobject_fields_.push_back(JSObjectField{value_data});
}
@@ -1839,35 +2079,50 @@ bool ObjectRef::equals(const ObjectRef& other) const {
Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-ContextRef ContextRef::previous() const {
+ContextRef ContextRef::previous(size_t* depth, bool serialize) const {
+ DCHECK_NOT_NULL(depth);
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
- return ContextRef(broker(),
- handle(object()->previous(), broker()->isolate()));
+ Context current = *object();
+ while (*depth != 0 && current.unchecked_previous().IsContext()) {
+ current = Context::cast(current.unchecked_previous());
+ (*depth)--;
+ }
+ return ContextRef(broker(), handle(current, broker()->isolate()));
}
- return ContextRef(broker(), data()->AsContext()->previous());
+ ContextData* current = this->data()->AsContext();
+ return ContextRef(broker(), current->previous(broker(), depth, serialize));
}
-// Not needed for TypedLowering.
-ObjectRef ContextRef::get(int index) const {
+base::Optional<ObjectRef> ContextRef::get(int index, bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference handle_dereference;
Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
- return ObjectRef(broker(), data()->AsContext()->GetSlot(index));
+ ObjectData* optional_slot =
+ data()->AsContext()->GetSlot(broker(), index, serialize);
+ if (optional_slot != nullptr) {
+ return ObjectRef(broker(), optional_slot);
+ }
+ return base::nullopt;
}
-JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone)
+JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
+ bool tracing_enabled)
: isolate_(isolate),
broker_zone_(broker_zone),
current_zone_(broker_zone),
refs_(new (zone())
RefsMap(kMinimalRefsBucketCount, AddressMatcher(), zone())),
array_and_object_prototypes_(zone()),
- feedback_(zone()) {
+ tracing_enabled_(tracing_enabled),
+ feedback_(zone()),
+ bytecode_analyses_(zone()),
+ ais_for_loading_then_(zone()),
+ ais_for_loading_exec_(zone()) {
// Note that this initialization of the refs_ pointer with the minimal
// initial capacity is redundant in the normal use case (concurrent
// compilation enabled, standard objects to be serialized), as the map
@@ -1939,7 +2194,9 @@ void JSHeapBroker::SerializeShareableObjects() {
{
Builtins::Name builtins[] = {
Builtins::kAllocateInYoungGeneration,
+ Builtins::kAllocateRegularInYoungGeneration,
Builtins::kAllocateInOldGeneration,
+ Builtins::kAllocateRegularInOldGeneration,
Builtins::kArgumentsAdaptorTrampoline,
Builtins::kArrayConstructorImpl,
Builtins::kCallFunctionForwardVarargs,
@@ -2400,6 +2657,11 @@ bool AllocationSiteRef::IsFastLiteral() const {
return data()->AsAllocationSite()->IsFastLiteral();
}
+void JSObjectRef::SerializeElements() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeElements(broker());
+}
+
void JSObjectRef::EnsureElementsTenured() {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation allow_handle_allocation;
@@ -2553,6 +2815,95 @@ double FixedDoubleArrayRef::get_scalar(int i) const {
return data()->AsFixedDoubleArray()->Get(i).get_scalar();
}
+uint8_t BytecodeArrayRef::get(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->get(index);
+ }
+ return data()->AsBytecodeArray()->get(index);
+}
+
+Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->GetFirstBytecodeAddress();
+ }
+ return data()->AsBytecodeArray()->GetFirstBytecodeAddress();
+}
+
+Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return handle(object()->constant_pool().get(index), broker()->isolate());
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndex(index,
+ broker()->isolate());
+}
+
+bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return object()->constant_pool().get(index).IsSmi();
+ }
+ return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
+}
+
+Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ return Smi::cast(object()->constant_pool().get(index));
+ }
+ return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
+}
+
+bool BytecodeArrayRef::IsSerializedForCompilation() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
+ return data()->AsBytecodeArray()->IsSerializedForCompilation();
+}
+
+void BytecodeArrayRef::SerializeForCompilation() {
+ if (broker()->mode() == JSHeapBroker::kDisabled) return;
+ data()->AsBytecodeArray()->SerializeForCompilation(broker());
+}
+
+const byte* BytecodeArrayRef::source_positions_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().GetDataStartAddress();
+ }
+ return data()->AsBytecodeArray()->source_positions_address();
+}
+
+int BytecodeArrayRef::source_positions_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->SourcePositionTableIfCollected().length();
+ }
+ return static_cast<int>(data()->AsBytecodeArray()->source_positions_size());
+}
+
+Address BytecodeArrayRef::handler_table_address() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return reinterpret_cast<Address>(
+ object()->handler_table().GetDataStartAddress());
+ }
+ return data()->AsBytecodeArray()->handler_table_address();
+}
+
+int BytecodeArrayRef::handler_table_size() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ return object()->handler_table().length();
+ }
+ return data()->AsBytecodeArray()->handler_table_size();
+}
+
#define IF_BROKER_DISABLED_ACCESS_HANDLE_C(holder, name) \
if (broker()->mode() == JSHeapBroker::kDisabled) { \
AllowHandleAllocation handle_allocation; \
@@ -2630,15 +2981,13 @@ BIMODAL_ACCESSOR_C(JSTypedArray, size_t, length)
BIMODAL_ACCESSOR(JSTypedArray, HeapObject, buffer)
BIMODAL_ACCESSOR_B(Map, bit_field2, elements_kind, Map::ElementsKindBits)
-BIMODAL_ACCESSOR_B(Map, bit_field2, is_extensible, Map::IsExtensibleBit)
-BIMODAL_ACCESSOR_B(Map, bit_field2, has_hidden_prototype,
- Map::HasHiddenPrototypeBit)
-BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_dictionary_map, Map::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_deprecated, Map::IsDeprecatedBit)
BIMODAL_ACCESSOR_B(Map, bit_field3, NumberOfOwnDescriptors,
Map::NumberOfOwnDescriptorsBits)
BIMODAL_ACCESSOR_B(Map, bit_field3, is_migration_target,
Map::IsMigrationTargetBit)
+BIMODAL_ACCESSOR_B(Map, bit_field3, is_extensible, Map::IsExtensibleBit)
BIMODAL_ACCESSOR_B(Map, bit_field, has_prototype_slot, Map::HasPrototypeSlotBit)
BIMODAL_ACCESSOR_B(Map, bit_field, is_access_check_needed,
Map::IsAccessCheckNeededBit)
@@ -2663,7 +3012,109 @@ BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-BIMODAL_ACCESSOR(FunctionTemplateInfo, Object, call_code)
+base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return CallHandlerInfoRef(
+ broker(), handle(object()->call_code(), broker()->isolate()));
+ }
+ CallHandlerInfoData* call_code =
+ data()->AsFunctionTemplateInfo()->call_code();
+ if (!call_code) return base::nullopt;
+ return CallHandlerInfoRef(broker(), call_code);
+}
+
+bool FunctionTemplateInfoRef::is_signature_undefined() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ return object()->signature().IsUndefined(broker()->isolate());
+ }
+ return data()->AsFunctionTemplateInfo()->is_signature_undefined();
+}
+
+bool FunctionTemplateInfoRef::has_call_code() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ return call_optimization.is_simple_api_call();
+ }
+ return data()->AsFunctionTemplateInfo()->has_call_code();
+}
+
+BIMODAL_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
+
+HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
+ MapRef receiver_map, bool serialize) {
+ const HolderLookupResult not_found;
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleDereference allow_handle_dereference;
+ AllowHandleAllocation allow_handle_allocation;
+
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<Map> receiver_map_ref(receiver_map.object());
+ if (!receiver_map_ref->IsJSReceiverMap() ||
+ (receiver_map_ref->is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map_ref, &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound:
+ result.holder = JSObjectRef(broker(), holder);
+ break;
+ default:
+ DCHECK_EQ(result.holder, base::nullopt);
+ break;
+ }
+ return result;
+ }
+
+ FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
+ KnownReceiversMap::iterator lookup_it =
+ fti_data->known_receivers().find(receiver_map.data()->AsMap());
+ if (lookup_it != fti_data->known_receivers().cend()) {
+ return lookup_it->second;
+ }
+ if (!serialize) {
+ TRACE_BROKER_MISSING(broker(),
+ "holder for receiver with map " << receiver_map);
+ return not_found;
+ }
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() && !accept_any_receiver())) {
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), not_found});
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ CallOptimization call_optimization(broker()->isolate(), object());
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_map.object(), &result.lookup);
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound: {
+ result.holder = JSObjectRef(broker(), holder);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ break;
+ }
+ default: {
+ DCHECK_EQ(result.holder, base::nullopt);
+ fti_data->known_receivers().insert(
+ {receiver_map.data()->AsMap(), result});
+ }
+ }
+ return result;
+}
BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
@@ -2746,11 +3197,21 @@ bool StringRef::IsSeqString() const {
return data()->AsString()->is_seq_string();
}
+ScopeInfoRef NativeContextRef::scope_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference handle_dereference;
+ return ScopeInfoRef(broker(),
+ handle(object()->scope_info(), broker()->isolate()));
+ }
+ return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
+}
+
MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
if (broker()->mode() == JSHeapBroker::kDisabled) {
- return get(index).AsMap();
+ return get(index).value().AsMap();
}
return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
index - Context::FIRST_FUNCTION_MAP_INDEX));
@@ -2853,6 +3314,19 @@ base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
return ObjectRef(broker(), element);
}
+base::Optional<ObjectRef> JSObjectRef::GetOwnProperty(
+ Representation field_representation, FieldIndex index,
+ bool serialize) const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return GetOwnPropertyFromHeap(broker(), Handle<JSObject>::cast(object()),
+ field_representation, index);
+ }
+ ObjectData* property = data()->AsJSObject()->GetOwnProperty(
+ broker(), field_representation, index, serialize);
+ if (property == nullptr) return base::nullopt;
+ return ObjectRef(broker(), property);
+}
+
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(uint32_t index,
bool serialize) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
@@ -2884,14 +3358,19 @@ double MutableHeapNumberRef::value() const {
return data()->AsMutableHeapNumber()->value();
}
-CellRef ModuleRef::GetCell(int cell_index) const {
+uint64_t BigIntRef::AsUint64() const {
+ IF_BROKER_DISABLED_ACCESS_HANDLE_C(BigInt, AsUint64);
+ return data()->AsBigInt()->AsUint64();
+}
+
+CellRef SourceTextModuleRef::GetCell(int cell_index) const {
if (broker()->mode() == JSHeapBroker::kDisabled) {
AllowHandleAllocation handle_allocation;
AllowHandleDereference allow_handle_dereference;
return CellRef(broker(),
handle(object()->GetCell(cell_index), broker()->isolate()));
}
- return CellRef(broker(), data()->AsModule()->GetCell(cell_index));
+ return CellRef(broker(), data()->AsSourceTextModule()->GetCell(cell_index));
}
ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object)
@@ -3108,6 +3587,8 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
for (int i = first; i <= last; ++i) {
function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap());
}
+
+ scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo();
}
void JSFunctionRef::Serialize() {
@@ -3133,6 +3614,46 @@ bool JSFunctionRef::IsSerializedForCompilation() const {
shared().IsSerializedForCompilation(feedback_vector());
}
+JSArrayRef SharedFunctionInfoRef::GetTemplateObject(ObjectRef description,
+ FeedbackVectorRef vector,
+ FeedbackSlot slot,
+ bool serialize) {
+ // Look in the feedback vector for the array. A Smi indicates that it's
+ // not yet cached here.
+ ObjectRef candidate = vector.get(slot);
+ if (!candidate.IsSmi()) {
+ return candidate.AsJSArray();
+ }
+
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ AllowHandleAllocation handle_allocation;
+ AllowHandleDereference allow_handle_dereference;
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ return JSArrayRef(broker(), template_object);
+ }
+
+ JSArrayData* array = data()->AsSharedFunctionInfo()->GetTemplateObject(slot);
+ if (array != nullptr) return JSArrayRef(broker(), array);
+
+ CHECK(serialize);
+ CHECK(broker()->SerializingAllowed());
+
+ Handle<TemplateObjectDescription> tod =
+ Handle<TemplateObjectDescription>::cast(description.object());
+ Handle<JSArray> template_object =
+ TemplateObjectDescription::GetTemplateObject(
+ broker()->isolate(), broker()->native_context().object(), tod,
+ object(), slot.ToInt());
+ array = broker()->GetOrCreateData(template_object)->AsJSArray();
+ data()->AsSharedFunctionInfo()->SetTemplateObject(slot, array);
+ return JSArrayRef(broker(), array);
+}
+
void SharedFunctionInfoRef::SetSerializedForCompilation(
FeedbackVectorRef feedback) {
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
@@ -3140,9 +3661,27 @@ void SharedFunctionInfoRef::SetSerializedForCompilation(
feedback);
}
+void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+
+ data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
+}
+
+base::Optional<FunctionTemplateInfoRef>
+SharedFunctionInfoRef::function_template_info() const {
+ if (broker()->mode() == JSHeapBroker::kDisabled) {
+ return FunctionTemplateInfoRef(
+ broker(), handle(object()->function_data(), broker()->isolate()));
+ }
+ FunctionTemplateInfoData* function_template_info =
+ data()->AsSharedFunctionInfo()->function_template_info();
+ if (!function_template_info) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), function_template_info);
+}
+
bool SharedFunctionInfoRef::IsSerializedForCompilation(
FeedbackVectorRef feedback) const {
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
+ if (broker()->mode() == JSHeapBroker::kDisabled) return true;
return data()->AsSharedFunctionInfo()->IsSerializedForCompilation(feedback);
}
@@ -3181,22 +3720,10 @@ bool MapRef::serialized_prototype() const {
return data()->AsMap()->serialized_prototype();
}
-void ModuleRef::Serialize() {
+void SourceTextModuleRef::Serialize() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsModule()->Serialize(broker());
-}
-
-void ContextRef::SerializeContextChain() {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeContextChain(broker());
-}
-
-void ContextRef::SerializeSlot(int index) {
- if (broker()->mode() == JSHeapBroker::kDisabled) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsContext()->SerializeSlot(broker(), index);
+ data()->AsSourceTextModule()->Serialize(broker());
}
void NativeContextRef::Serialize() {
@@ -3228,10 +3755,10 @@ void PropertyCellRef::Serialize() {
data()->AsPropertyCell()->Serialize(broker());
}
-void FunctionTemplateInfoRef::Serialize() {
+void FunctionTemplateInfoRef::SerializeCallCode() {
if (broker()->mode() == JSHeapBroker::kDisabled) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFunctionTemplateInfo()->Serialize(broker());
+ data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
}
base::Optional<PropertyCellRef> JSGlobalProxyRef::GetPropertyCell(
@@ -3307,10 +3834,67 @@ base::Optional<ObjectRef> GlobalAccessFeedback::GetConstantHint() const {
return {};
}
-ElementAccessFeedback::ElementAccessFeedback(Zone* zone)
+KeyedAccessMode KeyedAccessMode::FromNexus(FeedbackNexus const& nexus) {
+ if (IsKeyedLoadICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kLoad, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedHasICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kHas, nexus.GetKeyedAccessLoadMode());
+ }
+ if (IsKeyedStoreICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStore, nexus.GetKeyedAccessStoreMode());
+ }
+ if (IsStoreInArrayLiteralICKind(nexus.kind())) {
+ return KeyedAccessMode(AccessMode::kStoreInLiteral,
+ nexus.GetKeyedAccessStoreMode());
+ }
+ UNREACHABLE();
+}
+
+AccessMode KeyedAccessMode::access_mode() const { return access_mode_; }
+
+bool KeyedAccessMode::IsLoad() const {
+ return access_mode_ == AccessMode::kLoad || access_mode_ == AccessMode::kHas;
+}
+bool KeyedAccessMode::IsStore() const {
+ return access_mode_ == AccessMode::kStore ||
+ access_mode_ == AccessMode::kStoreInLiteral;
+}
+
+KeyedAccessLoadMode KeyedAccessMode::load_mode() const {
+ CHECK(IsLoad());
+ return load_store_mode_.load_mode;
+}
+
+KeyedAccessStoreMode KeyedAccessMode::store_mode() const {
+ CHECK(IsStore());
+ return load_store_mode_.store_mode;
+}
+
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessLoadMode load_mode)
+ : load_mode(load_mode) {}
+KeyedAccessMode::LoadStoreMode::LoadStoreMode(KeyedAccessStoreMode store_mode)
+ : store_mode(store_mode) {}
+
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessLoadMode load_mode)
+ : access_mode_(access_mode), load_store_mode_(load_mode) {
+ CHECK(!IsStore());
+ CHECK(IsLoad());
+}
+KeyedAccessMode::KeyedAccessMode(AccessMode access_mode,
+ KeyedAccessStoreMode store_mode)
+ : access_mode_(access_mode), load_store_mode_(store_mode) {
+ CHECK(!IsLoad());
+ CHECK(IsStore());
+}
+
+ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
+ KeyedAccessMode const& keyed_mode)
: ProcessedFeedback(kElementAccess),
receiver_maps(zone),
- transitions(zone) {}
+ transitions(zone),
+ keyed_mode(keyed_mode) {}
ElementAccessFeedback::MapIterator::MapIterator(
ElementAccessFeedback const& processed, JSHeapBroker* broker)
@@ -3383,7 +3967,7 @@ GlobalAccessFeedback const* JSHeapBroker::GetGlobalAccessFeedback(
}
ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps) {
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode) {
DCHECK(!maps.empty());
// Collect possible transition targets.
@@ -3397,7 +3981,8 @@ ElementAccessFeedback const* JSHeapBroker::ProcessFeedbackMapsForElementAccess(
}
}
- ElementAccessFeedback* result = new (zone()) ElementAccessFeedback(zone());
+ ElementAccessFeedback* result =
+ new (zone()) ElementAccessFeedback(zone(), keyed_mode);
// Separate the actual receiver maps and the possible transition sources.
for (Handle<Map> map : maps) {
@@ -3464,7 +4049,7 @@ GlobalAccessFeedback const* JSHeapBroker::ProcessFeedbackForGlobalAccess(
}
ContextRef context_ref(this, context);
if (immutable) {
- context_ref.SerializeSlot(context_slot_index);
+ context_ref.get(context_slot_index, true);
}
return new (zone())
GlobalAccessFeedback(context_ref, context_slot_index, immutable);
@@ -3489,6 +4074,54 @@ base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
return NameRef(this, handle(raw_name, isolate()));
}
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingThen(MapRef map) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ TRACE_BROKER_MISSING(
+ this, "access info for reducing JSResolvePromise with map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+void JSHeapBroker::CreateAccessInfoForLoadingThen(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_then_.find(map);
+ if (access_info == ais_for_loading_then_.end()) {
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ Handle<Name> then_string = isolate()->factory()->then_string();
+ ais_for_loading_then_.insert(
+ std::make_pair(map, access_info_factory.ComputePropertyAccessInfo(
+ map.object(), then_string, AccessMode::kLoad)));
+ }
+}
+
+PropertyAccessInfo JSHeapBroker::GetAccessInfoForLoadingExec(MapRef map) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info == ais_for_loading_exec_.end()) {
+ TRACE_BROKER_MISSING(this,
+ "access info for property 'exec' on map " << map);
+ return PropertyAccessInfo::Invalid(zone());
+ }
+ return access_info->second;
+}
+
+PropertyAccessInfo const& JSHeapBroker::CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies) {
+ auto access_info = ais_for_loading_exec_.find(map);
+ if (access_info != ais_for_loading_exec_.end()) {
+ return access_info->second;
+ }
+
+ ZoneVector<PropertyAccessInfo> access_infos(zone());
+ AccessInfoFactory access_info_factory(this, dependencies, zone());
+ PropertyAccessInfo ai_exec = access_info_factory.ComputePropertyAccessInfo(
+ map.object(), isolate()->factory()->exec_string(), AccessMode::kLoad);
+
+ auto inserted_ai = ais_for_loading_exec_.insert(std::make_pair(map, ai_exec));
+ return inserted_ai.first->second;
+}
+
ElementAccessFeedback const* ProcessedFeedback::AsElementAccess() const {
CHECK_EQ(kElementAccess, kind());
return static_cast<ElementAccessFeedback const*>(this);
@@ -3499,6 +4132,66 @@ NamedAccessFeedback const* ProcessedFeedback::AsNamedAccess() const {
return static_cast<NamedAccessFeedback const*>(this);
}
+BytecodeAnalysis const& JSHeapBroker::GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_bailout_id,
+ bool analyze_liveness, bool serialize) {
+ ObjectData* bytecode_array_data = GetData(bytecode_array);
+ CHECK_NOT_NULL(bytecode_array_data);
+
+ auto it = bytecode_analyses_.find(bytecode_array_data);
+ if (it != bytecode_analyses_.end()) {
+ // Bytecode analysis can be run for OSR or for non-OSR. In the rare case
+ // where we optimize for OSR and consider the top-level function itself for
+ // inlining (because of recursion), we need both the OSR and the non-OSR
+ // analysis. Fortunately, the only difference between the two lies in
+ // whether the OSR entry offset gets computed (from the OSR bailout id).
+ // Hence it's okay to reuse the OSR-version when asked for the non-OSR
+ // version, such that we need to store at most one analysis result per
+ // bytecode array.
+ CHECK_IMPLIES(osr_bailout_id != it->second->osr_bailout_id(),
+ osr_bailout_id.IsNone());
+ CHECK_EQ(analyze_liveness, it->second->liveness_analyzed());
+ return *it->second;
+ }
+
+ CHECK(serialize);
+ BytecodeAnalysis* analysis = new (zone()) BytecodeAnalysis(
+ bytecode_array, zone(), osr_bailout_id, analyze_liveness);
+ DCHECK_EQ(analysis->osr_bailout_id(), osr_bailout_id);
+ bytecode_analyses_[bytecode_array_data] = analysis;
+ return *analysis;
+}
+
+OffHeapBytecodeArray::OffHeapBytecodeArray(BytecodeArrayRef bytecode_array)
+ : array_(bytecode_array) {}
+
+int OffHeapBytecodeArray::length() const { return array_.length(); }
+
+int OffHeapBytecodeArray::parameter_count() const {
+ return array_.parameter_count();
+}
+
+uint8_t OffHeapBytecodeArray::get(int index) const { return array_.get(index); }
+
+void OffHeapBytecodeArray::set(int index, uint8_t value) { UNREACHABLE(); }
+
+Address OffHeapBytecodeArray::GetFirstBytecodeAddress() const {
+ return array_.GetFirstBytecodeAddress();
+}
+
+Handle<Object> OffHeapBytecodeArray::GetConstantAtIndex(
+ int index, Isolate* isolate) const {
+ return array_.GetConstantAtIndex(index);
+}
+
+bool OffHeapBytecodeArray::IsConstantAtIndexSmi(int index) const {
+ return array_.IsConstantAtIndexSmi(index);
+}
+
+Smi OffHeapBytecodeArray::GetConstantAtIndexAsSmi(int index) const {
+ return array_.GetConstantAtIndexAsSmi(index);
+}
+
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 2c4cc766bc..ffc10d2b93 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -8,796 +8,24 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
#include "src/common/globals.h"
+#include "src/compiler/access-info.h"
#include "src/compiler/refs-map.h"
#include "src/handles/handles.h"
+#include "src/interpreter/bytecode-array-accessor.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/function-kind.h"
-#include "src/objects/instance-type.h"
#include "src/objects/objects.h"
#include "src/utils/ostreams.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-
-class BytecodeArray;
-class CallHandlerInfo;
-class FixedDoubleArray;
-class FunctionTemplateInfo;
-class HeapNumber;
-class InternalizedString;
-class JSBoundFunction;
-class JSDataView;
-class JSGlobalProxy;
-class JSRegExp;
-class JSTypedArray;
-class NativeContext;
-class ScriptContextTable;
-class VectorSlotPair;
-
namespace compiler {
-// Whether we are loading a property or storing to a property.
-// For a store during literal creation, do not walk up the prototype chain.
-enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
-
-enum class OddballType : uint8_t {
- kNone, // Not an Oddball.
- kBoolean, // True or False.
- kUndefined,
- kNull,
- kHole,
- kUninitialized,
- kOther // Oddball, but none of the above.
-};
-
-// This list is sorted such that subtypes appear before their supertypes.
-// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray) \
- V(JSBoundFunction) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSGlobalProxy) \
- V(JSRegExp) \
- V(JSTypedArray) \
- /* Subtypes of Context */ \
- V(NativeContext) \
- /* Subtypes of FixedArray */ \
- V(Context) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- /* Subtypes of Name */ \
- V(InternalizedString) \
- V(String) \
- V(Symbol) \
- /* Subtypes of HeapObject */ \
- V(AllocationSite) \
- V(CallHandlerInfo) \
- V(Cell) \
- V(Code) \
- V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArrayBase) \
- V(FunctionTemplateInfo) \
- V(HeapNumber) \
- V(JSObject) \
- V(Map) \
- V(Module) \
- V(MutableHeapNumber) \
- V(Name) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
- /* Subtypes of Object */ \
- V(HeapObject)
-
-class CompilationDependencies;
-class JSHeapBroker;
-class ObjectData;
-class PerIsolateCompilerCache;
-class PropertyAccessInfo;
-#define FORWARD_DECL(Name) class Name##Ref;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
-
-class V8_EXPORT_PRIVATE ObjectRef {
- public:
- ObjectRef(JSHeapBroker* broker, Handle<Object> object);
- ObjectRef(JSHeapBroker* broker, ObjectData* data)
- : data_(data), broker_(broker) {
- CHECK_NOT_NULL(data_);
- }
-
- Handle<Object> object() const;
-
- bool equals(const ObjectRef& other) const;
-
- bool IsSmi() const;
- int AsSmi() const;
-
-#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
-#undef HEAP_IS_METHOD_DECL
-
-#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
-#undef HEAP_AS_METHOD_DECL
-
- bool IsNullOrUndefined() const;
-
- bool BooleanValue() const;
- Maybe<double> OddballToNumber() const;
-
- // Return the element at key {index} if {index} is known to be an own data
- // property of the object that is non-writable and non-configurable.
- base::Optional<ObjectRef> GetOwnConstantElement(uint32_t index,
- bool serialize = false) const;
-
- Isolate* isolate() const;
-
- protected:
- JSHeapBroker* broker() const;
- ObjectData* data() const;
- ObjectData* data_; // Should be used only by object() getters.
-
- private:
- friend class JSArrayData;
- friend class JSGlobalProxyRef;
- friend class JSGlobalProxyData;
- friend class JSObjectData;
- friend class StringData;
-
- friend std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-
- JSHeapBroker* broker_;
-};
-
+class BytecodeAnalysis;
+class ObjectRef;
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref);
-// Temporary class that carries information from a Map. We'd like to remove
-// this class and use MapRef instead, but we can't as long as we support the
-// kDisabled broker mode. That's because obtaining the MapRef via
-// HeapObjectRef::map() requires a HandleScope when the broker is disabled.
-// During OptimizeGraph we generally don't have a HandleScope, however. There
-// are two places where we therefore use GetHeapObjectType() instead. Both that
-// function and this class should eventually be removed.
-class HeapObjectType {
- public:
- enum Flag : uint8_t { kUndetectable = 1 << 0, kCallable = 1 << 1 };
-
- using Flags = base::Flags<Flag>;
-
- HeapObjectType(InstanceType instance_type, Flags flags,
- OddballType oddball_type)
- : instance_type_(instance_type),
- oddball_type_(oddball_type),
- flags_(flags) {
- DCHECK_EQ(instance_type == ODDBALL_TYPE,
- oddball_type != OddballType::kNone);
- }
-
- OddballType oddball_type() const { return oddball_type_; }
- InstanceType instance_type() const { return instance_type_; }
- Flags flags() const { return flags_; }
-
- bool is_callable() const { return flags_ & kCallable; }
- bool is_undetectable() const { return flags_ & kUndetectable; }
-
- private:
- InstanceType const instance_type_;
- OddballType const oddball_type_;
- Flags const flags_;
-};
-
-class HeapObjectRef : public ObjectRef {
- public:
- using ObjectRef::ObjectRef;
- Handle<HeapObject> object() const;
-
- MapRef map() const;
-
- // See the comment on the HeapObjectType class.
- HeapObjectType GetHeapObjectType() const;
-};
-
-class PropertyCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<PropertyCell> object() const;
-
- PropertyDetails property_details() const;
-
- void Serialize();
- ObjectRef value() const;
-};
-
-class JSObjectRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<JSObject> object() const;
-
- uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index) const;
- double RawFastDoublePropertyAt(FieldIndex index) const;
- ObjectRef RawFastPropertyAt(FieldIndex index) const;
-
- FixedArrayBaseRef elements() const;
- void EnsureElementsTenured();
- ElementsKind GetElementsKind() const;
-
- void SerializeObjectCreateMap();
- base::Optional<MapRef> GetObjectCreateMap() const;
-};
-
-class JSDataViewRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSDataView> object() const;
-
- size_t byte_length() const;
- size_t byte_offset() const;
-};
-
-class JSBoundFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSBoundFunction> object() const;
-
- void Serialize();
-
- // The following are available only after calling Serialize().
- ObjectRef bound_target_function() const;
- ObjectRef bound_this() const;
- FixedArrayRef bound_arguments() const;
-};
-
-class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSFunction> object() const;
-
- bool has_feedback_vector() const;
- bool has_initial_map() const;
- bool has_prototype() const;
- bool PrototypeRequiresRuntimeLookup() const;
-
- void Serialize();
- bool serialized() const;
-
- // The following are available only after calling Serialize().
- ObjectRef prototype() const;
- MapRef initial_map() const;
- ContextRef context() const;
- NativeContextRef native_context() const;
- SharedFunctionInfoRef shared() const;
- FeedbackVectorRef feedback_vector() const;
- int InitialMapInstanceSizeWithMinSlack() const;
-
- bool IsSerializedForCompilation() const;
-};
-
-class JSRegExpRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSRegExp> object() const;
-
- ObjectRef raw_properties_or_hash() const;
- ObjectRef data() const;
- ObjectRef source() const;
- ObjectRef flags() const;
- ObjectRef last_index() const;
-};
-
-class HeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<HeapNumber> object() const;
-
- double value() const;
-};
-
-class MutableHeapNumberRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<MutableHeapNumber> object() const;
-
- double value() const;
-};
-
-class ContextRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Context> object() const;
-
- void SerializeContextChain();
- ContextRef previous() const;
-
- void SerializeSlot(int index);
- ObjectRef get(int index) const;
-};
-
-#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- V(JSFunction, array_function) \
- V(JSFunction, boolean_function) \
- V(JSFunction, bigint_function) \
- V(JSFunction, number_function) \
- V(JSFunction, object_function) \
- V(JSFunction, promise_function) \
- V(JSFunction, promise_then) \
- V(JSFunction, string_function) \
- V(JSFunction, symbol_function) \
- V(JSGlobalProxy, global_proxy_object) \
- V(JSObject, promise_prototype) \
- V(Map, bound_function_with_constructor_map) \
- V(Map, bound_function_without_constructor_map) \
- V(Map, fast_aliased_arguments_map) \
- V(Map, initial_array_iterator_map) \
- V(Map, initial_string_iterator_map) \
- V(Map, iterator_result_map) \
- V(Map, js_array_holey_double_elements_map) \
- V(Map, js_array_holey_elements_map) \
- V(Map, js_array_holey_smi_elements_map) \
- V(Map, js_array_packed_double_elements_map) \
- V(Map, js_array_packed_elements_map) \
- V(Map, js_array_packed_smi_elements_map) \
- V(Map, sloppy_arguments_map) \
- V(Map, slow_object_with_null_prototype_map) \
- V(Map, strict_arguments_map) \
- V(ScriptContextTable, script_context_table) \
- V(SharedFunctionInfo, promise_capability_default_reject_shared_fun) \
- V(SharedFunctionInfo, promise_catch_finally_shared_fun) \
- V(SharedFunctionInfo, promise_then_finally_shared_fun) \
- V(SharedFunctionInfo, promise_capability_default_resolve_shared_fun)
-
-// Those are set by Bootstrapper::ExportFromRuntime, which may not yet have
-// happened when Turbofan is invoked via --always-opt.
-#define BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V) \
- V(Map, async_function_object_map) \
- V(Map, map_key_iterator_map) \
- V(Map, map_key_value_iterator_map) \
- V(Map, map_value_iterator_map) \
- V(Map, set_key_value_iterator_map) \
- V(Map, set_value_iterator_map)
-
-#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(V)
-
-class NativeContextRef : public ContextRef {
- public:
- using ContextRef::ContextRef;
- Handle<NativeContext> object() const;
-
- void Serialize();
-
-#define DECL_ACCESSOR(type, name) type##Ref name() const;
- BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- MapRef GetFunctionMapFromIndex(int index) const;
- MapRef GetInitialJSArrayMap(ElementsKind kind) const;
- base::Optional<JSFunctionRef> GetConstructorFunction(const MapRef& map) const;
-};
-
-class NameRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Name> object() const;
-
- bool IsUniqueName() const;
-};
-
-class ScriptContextTableRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScriptContextTable> object() const;
-
- struct LookupResult {
- ContextRef context;
- bool immutable;
- int index;
- };
-
- base::Optional<LookupResult> lookup(const NameRef& name) const;
-};
-
-class DescriptorArrayRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<DescriptorArray> object() const;
-};
-
-class FeedbackCellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackCell> object() const;
-
- HeapObjectRef value() const;
-};
-
-class FeedbackVectorRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FeedbackVector> object() const;
-
- ObjectRef get(FeedbackSlot slot) const;
-
- void SerializeSlots();
-};
-
-class FunctionTemplateInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FunctionTemplateInfo> object() const;
-
- void Serialize();
- ObjectRef call_code() const;
-};
-
-class CallHandlerInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<CallHandlerInfo> object() const;
-
- Address callback() const;
-
- void Serialize();
- ObjectRef data() const;
-};
-
-class AllocationSiteRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<AllocationSite> object() const;
-
- bool PointsToLiteral() const;
- AllocationType GetAllocationType() const;
- ObjectRef nested_site() const;
-
- // {IsFastLiteral} determines whether the given array or object literal
- // boilerplate satisfies all limits to be considered for fast deep-copying
- // and computes the total size of all objects that are part of the graph.
- //
- // If PointsToLiteral() is false, then IsFastLiteral() is also false.
- bool IsFastLiteral() const;
- // We only serialize boilerplate if IsFastLiteral is true.
- base::Optional<JSObjectRef> boilerplate() const;
-
- ElementsKind GetElementsKind() const;
- bool CanInlineCall() const;
-};
-
-class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Map> object() const;
-
- int instance_size() const;
- InstanceType instance_type() const;
- int GetInObjectProperties() const;
- int GetInObjectPropertiesStartInWords() const;
- int NumberOfOwnDescriptors() const;
- int GetInObjectPropertyOffset(int index) const;
- int constructor_function_index() const;
- int NextFreePropertyIndex() const;
- int UnusedPropertyFields() const;
- ElementsKind elements_kind() const;
- bool is_stable() const;
- bool is_extensible() const;
- bool is_constructor() const;
- bool has_prototype_slot() const;
- bool is_access_check_needed() const;
- bool is_deprecated() const;
- bool CanBeDeprecated() const;
- bool CanTransition() const;
- bool IsInobjectSlackTrackingInProgress() const;
- bool is_dictionary_map() const;
- bool IsFixedCowArrayMap() const;
- bool IsPrimitiveMap() const;
- bool is_undetectable() const;
- bool is_callable() const;
- bool has_indexed_interceptor() const;
- bool has_hidden_prototype() const;
- bool is_migration_target() const;
- bool supports_fast_array_iteration() const;
- bool supports_fast_array_resize() const;
- bool IsMapOfCurrentGlobalProxy() const;
-
- OddballType oddball_type() const;
-
-#define DEF_TESTER(Type, ...) bool Is##Type##Map() const;
- INSTANCE_TYPE_CHECKERS(DEF_TESTER)
-#undef DEF_TESTER
-
- void SerializeBackPointer();
- HeapObjectRef GetBackPointer() const;
-
- void SerializePrototype();
- bool serialized_prototype() const;
- HeapObjectRef prototype() const;
-
- void SerializeForElementLoad();
-
- void SerializeForElementStore();
- bool HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps);
-
- // Concerning the underlying instance_descriptors:
- void SerializeOwnDescriptors();
- void SerializeOwnDescriptor(int descriptor_index);
- MapRef FindFieldOwner(int descriptor_index) const;
- PropertyDetails GetPropertyDetails(int descriptor_index) const;
- NameRef GetPropertyKey(int descriptor_index) const;
- FieldIndex GetFieldIndexFor(int descriptor_index) const;
- ObjectRef GetFieldType(int descriptor_index) const;
- bool IsUnboxedDoubleField(int descriptor_index) const;
-
- // Available after calling JSFunctionRef::Serialize on a function that has
- // this map as initial map.
- ObjectRef GetConstructor() const;
- base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
-};
-
-class FixedArrayBaseRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<FixedArrayBase> object() const;
-
- int length() const;
-};
-
-class FixedArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedArray> object() const;
-
- ObjectRef get(int i) const;
-};
-
-class FixedDoubleArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<FixedDoubleArray> object() const;
-
- double get_scalar(int i) const;
- bool is_the_hole(int i) const;
-};
-
-class BytecodeArrayRef : public FixedArrayBaseRef {
- public:
- using FixedArrayBaseRef::FixedArrayBaseRef;
- Handle<BytecodeArray> object() const;
-
- int register_count() const;
- int parameter_count() const;
- interpreter::Register incoming_new_target_or_generator_register() const;
-};
-
-class JSArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSArray> object() const;
-
- ObjectRef length() const;
-
- // Return the element at key {index} if the array has a copy-on-write elements
- // storage and {index} is known to be an own data property.
- base::Optional<ObjectRef> GetOwnCowElement(uint32_t index,
- bool serialize = false) const;
-};
-
-class ScopeInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ScopeInfo> object() const;
-
- int ContextLength() const;
-};
-
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(bool, is_safe_to_skip_arguments_adaptor) \
- V(bool, IsInlineable) \
- V(bool, is_compiled)
-
-class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<SharedFunctionInfo> object() const;
-
- int builtin_id() const;
- BytecodeArrayRef GetBytecodeArray() const;
-
-#define DECL_ACCESSOR(type, name) type name() const;
- BROKER_SFI_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- bool IsSerializedForCompilation(FeedbackVectorRef feedback) const;
- void SetSerializedForCompilation(FeedbackVectorRef feedback);
-};
-
-class StringRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<String> object() const;
-
- int length() const;
- uint16_t GetFirstChar();
- base::Optional<double> ToNumber();
- bool IsSeqString() const;
- bool IsExternalString() const;
-};
-
-class SymbolRef : public NameRef {
- public:
- using NameRef::NameRef;
- Handle<Symbol> object() const;
-};
-
-class JSTypedArrayRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSTypedArray> object() const;
-
- bool is_on_heap() const;
- size_t length() const;
- void* external_pointer() const;
-
- void Serialize();
- bool serialized() const;
-
- HeapObjectRef buffer() const;
-};
-
-class ModuleRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Module> object() const;
-
- void Serialize();
-
- CellRef GetCell(int cell_index) const;
-};
-
-class CellRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Cell> object() const;
-
- ObjectRef value() const;
-};
-
-class JSGlobalProxyRef : public JSObjectRef {
- public:
- using JSObjectRef::JSObjectRef;
- Handle<JSGlobalProxy> object() const;
-
- // If {serialize} is false:
- // If the property is known to exist as a property cell (on the global
- // object), return that property cell. Otherwise (not known to exist as a
- // property cell or known not to exist as a property cell) return nothing.
- // If {serialize} is true:
- // Like above but potentially access the heap and serialize the necessary
- // information.
- base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name,
- bool serialize = false) const;
-};
-
-class CodeRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<Code> object() const;
-};
-
-class InternalizedStringRef : public StringRef {
- public:
- using StringRef::StringRef;
- Handle<InternalizedString> object() const;
-};
-
-class ElementAccessFeedback;
-class NamedAccessFeedback;
-
-class ProcessedFeedback : public ZoneObject {
- public:
- enum Kind { kInsufficient, kGlobalAccess, kNamedAccess, kElementAccess };
- Kind kind() const { return kind_; }
-
- ElementAccessFeedback const* AsElementAccess() const;
- NamedAccessFeedback const* AsNamedAccess() const;
-
- protected:
- explicit ProcessedFeedback(Kind kind) : kind_(kind) {}
-
- private:
- Kind const kind_;
-};
-
-class InsufficientFeedback final : public ProcessedFeedback {
- public:
- InsufficientFeedback();
-};
-
-class GlobalAccessFeedback : public ProcessedFeedback {
- public:
- explicit GlobalAccessFeedback(PropertyCellRef cell);
- GlobalAccessFeedback(ContextRef script_context, int slot_index,
- bool immutable);
-
- bool IsPropertyCell() const;
- PropertyCellRef property_cell() const;
-
- bool IsScriptContextSlot() const { return !IsPropertyCell(); }
- ContextRef script_context() const;
- int slot_index() const;
- bool immutable() const;
-
- base::Optional<ObjectRef> GetConstantHint() const;
-
- private:
- ObjectRef const cell_or_context_;
- int const index_and_immutable_;
-};
-
-class ElementAccessFeedback : public ProcessedFeedback {
- public:
- explicit ElementAccessFeedback(Zone* zone);
-
- // No transition sources appear in {receiver_maps}.
- // All transition targets appear in {receiver_maps}.
- ZoneVector<Handle<Map>> receiver_maps;
- ZoneVector<std::pair<Handle<Map>, Handle<Map>>> transitions;
-
- class MapIterator {
- public:
- bool done() const;
- void advance();
- MapRef current() const;
-
- private:
- friend class ElementAccessFeedback;
-
- explicit MapIterator(ElementAccessFeedback const& processed,
- JSHeapBroker* broker);
-
- ElementAccessFeedback const& processed_;
- JSHeapBroker* const broker_;
- size_t index_ = 0;
- };
-
- // Iterator over all maps: first {receiver_maps}, then transition sources.
- MapIterator all_maps(JSHeapBroker* broker) const;
-};
-
-class NamedAccessFeedback : public ProcessedFeedback {
- public:
- NamedAccessFeedback(NameRef const& name,
- ZoneVector<PropertyAccessInfo> const& access_infos);
-
- NameRef const& name() const { return name_; }
- ZoneVector<PropertyAccessInfo> const& access_infos() const {
- return access_infos_;
- }
-
- private:
- NameRef const name_;
- ZoneVector<PropertyAccessInfo> const access_infos_;
-};
-
struct FeedbackSource {
FeedbackSource(Handle<FeedbackVector> vector_, FeedbackSlot slot_)
: vector(vector_), slot(slot_) {}
@@ -821,26 +49,28 @@ struct FeedbackSource {
};
};
-#define TRACE_BROKER(broker, x) \
- do { \
- if (FLAG_trace_heap_broker_verbose) broker->Trace() << x << '\n'; \
+#define TRACE_BROKER(broker, x) \
+ do { \
+ if (broker->tracing_enabled() && FLAG_trace_heap_broker_verbose) \
+ broker->Trace() << x << '\n'; \
} while (false)
#define TRACE_BROKER_MISSING(broker, x) \
do { \
- if (FLAG_trace_heap_broker) \
+ if (broker->tracing_enabled()) \
broker->Trace() << __FUNCTION__ << ": missing " << x << '\n'; \
} while (false)
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
- JSHeapBroker(Isolate* isolate, Zone* broker_zone);
+ JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled);
void SetNativeContextRef();
void SerializeStandardObjects();
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return current_zone_; }
+ bool tracing_enabled() const { return tracing_enabled_; }
NativeContextRef native_context() const { return native_context_.value(); }
PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
@@ -875,12 +105,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- MapHandles const& maps);
+ MapHandles const& maps, KeyedAccessMode const& keyed_mode);
GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(
FeedbackSource const& source);
+ BytecodeAnalysis const& GetBytecodeAnalysis(
+ Handle<BytecodeArray> bytecode_array, BailoutId osr_offset,
+ bool analyze_liveness, bool serialize);
+
base::Optional<NameRef> GetNameFeedback(FeedbackNexus const& nexus);
+ // If there is no result stored for {map}, we return an Invalid
+ // PropertyAccessInfo.
+ PropertyAccessInfo GetAccessInfoForLoadingThen(MapRef map);
+ void CreateAccessInfoForLoadingThen(MapRef map,
+ CompilationDependencies* dependencies);
+ PropertyAccessInfo GetAccessInfoForLoadingExec(MapRef map);
+ PropertyAccessInfo const& CreateAccessInfoForLoadingExec(
+ MapRef map, CompilationDependencies* dependencies);
+
std::ostream& Trace();
void IncrementTracingIndentation();
void DecrementTracingIndentation();
@@ -902,12 +145,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Handle<JSObject>::equal_to>
array_and_object_prototypes_;
BrokerMode mode_ = kDisabled;
+ bool const tracing_enabled_;
StdoutStream trace_out_;
unsigned trace_indentation_ = 0;
PerIsolateCompilerCache* compiler_cache_;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
+ ZoneUnorderedMap<ObjectData*, BytecodeAnalysis*> bytecode_analyses_;
+ typedef ZoneUnorderedMap<MapRef, PropertyAccessInfo, ObjectRef::Hash,
+ ObjectRef::Equal>
+ MapToAccessInfos;
+ MapToAccessInfos ais_for_loading_then_;
+ MapToAccessInfos ais_for_loading_exec_;
static const size_t kMinimalRefsBucketCount = 8; // must be power of 2
static const size_t kInitialRefsBucketCount = 1024; // must be power of 2
@@ -948,6 +198,23 @@ Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
// compilation is finished.
bool CanInlineElementAccess(MapRef const& map);
+class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
+ public:
+ explicit OffHeapBytecodeArray(BytecodeArrayRef bytecode_array);
+
+ int length() const override;
+ int parameter_count() const override;
+ uint8_t get(int index) const override;
+ void set(int index, uint8_t value) override;
+ Address GetFirstBytecodeAddress() const override;
+ Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const override;
+ bool IsConstantAtIndexSmi(int index) const override;
+ Smi GetConstantAtIndexAsSmi(int index) const override;
+
+ private:
+ BytecodeArrayRef array_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index cc48ae80cb..7e7c9e3a0e 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -30,8 +30,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
ObjectRef object(broker(), HeapConstantOf(node->op()));
if (object.IsJSFunction()) object.AsJSFunction().Serialize();
if (object.IsJSObject()) object.AsJSObject().SerializeObjectCreateMap();
- if (object.IsModule()) object.AsModule().Serialize();
- if (object.IsContext()) object.AsContext().SerializeContextChain();
+ if (object.IsSourceTextModule()) object.AsSourceTextModule().Serialize();
break;
}
case IrOpcode::kJSCreateArray: {
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index f78635b139..e11d6b59a3 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -7,6 +7,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects/objects-inl.h"
@@ -21,15 +22,9 @@ namespace compiler {
} while (false)
namespace {
-
-bool IsSmallInlineFunction(BytecodeArrayRef bytecode) {
- // Forcibly inline small functions.
- if (bytecode.length() <= FLAG_max_inlined_bytecode_size_small) {
- return true;
- }
- return false;
+bool IsSmall(BytecodeArrayRef bytecode) {
+ return bytecode.length() <= FLAG_max_inlined_bytecode_size_small;
}
-
} // namespace
JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
@@ -65,7 +60,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
out.functions[n] = m.Ref(broker()).AsJSFunction();
JSFunctionRef function = out.functions[n].value();
if (function.IsSerializedForCompilation()) {
- out.bytecode[n] = function.shared().GetBytecodeArray(), isolate();
+ out.bytecode[n] = function.shared().GetBytecodeArray();
}
}
out.num_functions = value_input_count;
@@ -91,6 +86,11 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+ if (total_inlined_bytecode_size_ >= FLAG_max_inlined_bytecode_size_absolute &&
+ mode_ != kStressInlining) {
+ return NoChange();
+ }
+
// Check if we already saw that {node} before, and if so, just skip it.
if (seen_.find(node->id()) != seen_.end()) return NoChange();
seen_.insert(node->id());
@@ -107,7 +107,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
return NoChange();
}
- bool can_inline = false, force_inline_small = true;
+ bool can_inline_candidate = false, candidate_is_small = true;
candidate.total_size = 0;
Node* frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
@@ -155,15 +155,12 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
// serialized.
BytecodeArrayRef bytecode = candidate.bytecode[i].value();
if (candidate.can_inline_function[i]) {
- can_inline = true;
+ can_inline_candidate = true;
candidate.total_size += bytecode.length();
}
- // We don't force inline small functions if any of them is not inlineable.
- if (!IsSmallInlineFunction(bytecode)) {
- force_inline_small = false;
- }
+ candidate_is_small = candidate_is_small && IsSmall(bytecode);
}
- if (!can_inline) return NoChange();
+ if (!can_inline_candidate) return NoChange();
// Gather feedback on how often this call site has been hit before.
if (node->opcode() == IrOpcode::kJSCall) {
@@ -195,9 +192,8 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Forcibly inline small functions here. In the case of polymorphic inlining
- // force_inline_small is set only when all functions are small.
- if (force_inline_small &&
- cumulative_count_ < FLAG_max_inlined_bytecode_size_absolute) {
+ // candidate_is_small is set only when all functions are small.
+ if (candidate_is_small) {
TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
node->op()->mnemonic());
return InlineCandidate(candidate, true);
@@ -221,21 +217,24 @@ void JSInliningHeuristic::Finalize() {
Candidate candidate = *i;
candidates_.erase(i);
+ // Make sure we don't try to inline dead candidate nodes.
+ if (candidate.node->IsDead()) {
+ continue;
+ }
+
// Make sure we have some extra budget left, so that any small functions
// exposed by this function would be given a chance to inline.
double size_of_candidate =
candidate.total_size * FLAG_reserve_inline_budget_scale_factor;
- int total_size = cumulative_count_ + static_cast<int>(size_of_candidate);
+ int total_size =
+ total_inlined_bytecode_size_ + static_cast<int>(size_of_candidate);
if (total_size > FLAG_max_inlined_bytecode_size_cumulative) {
// Try if any smaller functions are available to inline.
continue;
}
- // Make sure we don't try to inline dead candidate nodes.
- if (!candidate.node->IsDead()) {
- Reduction const reduction = InlineCandidate(candidate, false);
- if (reduction.Changed()) return;
- }
+ Reduction const reduction = InlineCandidate(candidate, false);
+ if (reduction.Changed()) return;
}
}
@@ -630,7 +629,7 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
if (num_calls == 1) {
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
- cumulative_count_ += candidate.bytecode[0].value().length();
+ total_inlined_bytecode_size_ += candidate.bytecode[0].value().length();
}
return reduction;
}
@@ -688,20 +687,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
ReplaceWithValue(node, value, effect, control);
// Inline the individual, cloned call sites.
- for (int i = 0; i < num_calls; ++i) {
- Node* node = calls[i];
+ for (int i = 0; i < num_calls && total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_absolute;
+ ++i) {
if (candidate.can_inline_function[i] &&
- (small_function ||
- cumulative_count_ < FLAG_max_inlined_bytecode_size_cumulative)) {
+ (small_function || total_inlined_bytecode_size_ <
+ FLAG_max_inlined_bytecode_size_cumulative)) {
+ Node* node = calls[i];
Reduction const reduction = inliner_.ReduceJSCall(node);
if (reduction.Changed()) {
+ total_inlined_bytecode_size_ += candidate.bytecode[i]->length();
// Killing the call node is not strictly necessary, but it is safer to
// make sure we do not resurrect the node.
node->Kill();
- // Small functions don't count towards the budget.
- if (!small_function) {
- cumulative_count_ += candidate.bytecode[i]->length();
- }
}
}
}
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index 99ad258c31..b143e9b67f 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -97,7 +97,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
JSHeapBroker* const broker_;
- int cumulative_count_ = 0;
+ int total_inlined_bytecode_size_ = 0;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index e43e710da7..91cbea2346 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -7,11 +7,13 @@
#include "src/ast/ast.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -466,14 +468,13 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
AllowHandleAllocation allow_handle_alloc;
AllowHeapAllocation allow_heap_alloc;
AllowCodeDependencyChange allow_code_dep_change;
- Handle<Context> native_context =
- handle(info_->native_context(), isolate());
-
- BuildGraphFromBytecode(broker(), zone(), bytecode_array.object(),
- shared_info.value().object(),
- feedback_vector.object(), BailoutId::None(),
- jsgraph(), call.frequency(), source_positions_,
- native_context, inlining_id, flags);
+ CallFrequency frequency = call.frequency();
+ Handle<NativeContext> native_context(info_->native_context(), isolate());
+ BuildGraphFromBytecode(
+ broker(), zone(), bytecode_array.object(),
+ shared_info.value().object(), feedback_vector.object(),
+ BailoutId::None(), jsgraph(), frequency, source_positions_,
+ native_context, inlining_id, flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 94a9e71b2e..f50f7b591d 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -59,7 +59,8 @@ class JSInliner final : public AdvancedReducer {
SourcePositionTable* const source_positions_;
base::Optional<SharedFunctionInfoRef> DetermineCallTarget(Node* node);
- FeedbackVectorRef DetermineCallContext(Node* node, Node*& context_out);
+ FeedbackVectorRef DetermineCallContext(
+ Node* node, Node*& context_out); // NOLINT(runtime/references)
Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
int parameter_count, BailoutId bailout_id,
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 312ab38f51..7d742a5f32 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -33,12 +33,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-// This is needed for gc_mole which will compile this file without the full set
-// of GN defined macros.
-#ifndef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-#define V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP 64
-#endif
-
namespace {
bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
@@ -513,8 +507,8 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
Node* receiver, Node* effect, Handle<HeapObject> prototype) {
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect,
- &receiver_maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &receiver_maps);
if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
// Try to determine either that all of the {receiver_maps} have the given
@@ -686,6 +680,7 @@ Reduction JSNativeContextSpecialization::ReduceJSPromiseResolve(Node* node) {
// ES section #sec-promise-resolve-functions
Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSResolvePromise, node->opcode());
Node* promise = NodeProperties::GetValueInput(node, 0);
Node* resolution = NodeProperties::GetValueInput(node, 1);
@@ -702,9 +697,17 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
ZoneVector<PropertyAccessInfo> access_infos(graph()->zone());
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
- access_info_factory.ComputePropertyAccessInfos(
- resolution_maps, factory()->then_string(), AccessMode::kLoad,
- &access_infos);
+ if (!FLAG_concurrent_inlining) {
+ access_info_factory.ComputePropertyAccessInfos(
+ resolution_maps, factory()->then_string(), AccessMode::kLoad,
+ &access_infos);
+ } else {
+ // Obtain pre-computed access infos from the broker.
+ for (auto map : resolution_maps) {
+ MapRef map_ref(broker(), map);
+ access_infos.push_back(broker()->GetAccessInfoForLoadingThen(map_ref));
+ }
+ }
PropertyAccessInfo access_info =
access_info_factory.FinalizePropertyAccessInfosAsOne(access_infos,
AccessMode::kLoad);
@@ -975,9 +978,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
LoadGlobalParameters const& p = LoadGlobalParametersOf(node->op());
if (!p.feedback().IsValid()) return NoChange();
FeedbackSource source(p.feedback());
@@ -1007,9 +1009,8 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
- DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
-
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
Node* value = NodeProperties::GetValueInput(node, 0);
StoreGlobalParameters const& p = StoreGlobalParametersOf(node->op());
@@ -1298,7 +1299,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
- Node* node, Node* value, FeedbackNexus const& nexus, NameRef const& name,
+ Node* node, Node* value, FeedbackSource const& source, NameRef const& name,
AccessMode access_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
@@ -1312,11 +1313,11 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, nullptr, name, value,
- nexus, access_mode);
+ return ReducePropertyAccess(node, nullptr, name, value, source, access_mode);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const receiver = NodeProperties::GetValueInput(node, 0);
@@ -1355,56 +1356,47 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
}
}
- // Extract receiver maps from the load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(), nexus, name,
+ return ReduceNamedAccessFromNexus(node, jsgraph()->Dead(),
+ FeedbackSource(p.feedback()), name,
AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(
- node, value, nexus, NameRef(broker(), p.name()), AccessMode::kStore);
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
+ NameRef(broker(), p.name()),
+ AccessMode::kStore);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
Node* const value = NodeProperties::GetValueInput(node, 1);
- // Extract receiver maps from the IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Try to lower the creation of a named property based on the {receiver_maps}.
- return ReduceNamedAccessFromNexus(node, value, nexus,
+ return ReduceNamedAccessFromNexus(node, value, FeedbackSource(p.feedback()),
NameRef(broker(), p.name()),
AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
- Node* node, Node* index, Node* value, AccessMode access_mode,
- KeyedAccessLoadMode load_mode) {
+ Node* node, Node* index, Node* value, KeyedAccessMode const& keyed_mode) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Strings are immutable in JavaScript.
- if (access_mode == AccessMode::kStore) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kStore) return NoChange();
// `in` cannot be used on strings.
- if (access_mode == AccessMode::kHas) return NoChange();
+ if (keyed_mode.access_mode() == AccessMode::kHas) return NoChange();
// Ensure that the {receiver} is actually a String.
receiver = effect = graph()->NewNode(
@@ -1416,7 +1408,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccessOnString(
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
value = BuildIndexedStringLoad(receiver, index, length, &effect, &control,
- load_mode);
+ keyed_mode.load_mode());
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1437,24 +1429,31 @@ base::Optional<JSTypedArrayRef> GetTypedArrayConstant(JSHeapBroker* broker,
Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessFeedback const& processed) {
DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
-
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
node->opcode() == IrOpcode::kJSHasProperty);
+
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state =
NodeProperties::FindFrameStateBefore(node, jsgraph()->Dead());
+ AccessMode access_mode = processed.keyed_mode.access_mode();
+ if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
+ receiver->opcode() == IrOpcode::kHeapConstant) {
+ Reduction reduction = ReduceKeyedLoadFromHeapConstant(
+ node, index, access_mode, processed.keyed_mode.load_mode());
+ if (reduction.Changed()) return reduction;
+ }
+
if (HasOnlyStringMaps(broker(), processed.receiver_maps)) {
DCHECK(processed.transitions.empty());
- return ReduceElementAccessOnString(node, index, value, access_mode,
- load_mode);
+ return ReduceElementAccessOnString(node, index, value,
+ processed.keyed_mode);
}
// Compute element access infos for the receiver maps.
@@ -1485,7 +1484,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// then we need to check that all prototypes have stable maps with
// fast elements (and we need to guard against changes to that below).
if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
- IsGrowStoreMode(store_mode)) &&
+ IsGrowStoreMode(processed.keyed_mode.store_mode())) &&
!receiver_map.HasOnlyStablePrototypesWithFastElements(
&prototype_maps)) {
return NoChange();
@@ -1558,7 +1557,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
// Access the actual element.
ValueEffectControl continuation =
BuildElementAccess(receiver, index, value, effect, control, access_info,
- access_mode, load_mode, store_mode);
+ processed.keyed_mode);
value = continuation.value();
effect = continuation.effect();
control = continuation.control();
@@ -1591,7 +1590,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
? ElementsTransition::kFastTransition
: ElementsTransition::kSlowTransition,
transition_source.object(), transition_target.object())),
- receiver, effect, control);
+ receiver, this_effect, this_control);
}
// Perform map check(s) on {receiver}.
@@ -1623,9 +1622,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Access the actual element.
- ValueEffectControl continuation = BuildElementAccess(
- this_receiver, this_index, this_value, this_effect, this_control,
- access_info, access_mode, load_mode, store_mode);
+ ValueEffectControl continuation =
+ BuildElementAccess(this_receiver, this_index, this_value, this_effect,
+ this_control, access_info, processed.keyed_mode);
values.push_back(continuation.value());
effects.push_back(continuation.effect());
controls.push_back(continuation.control());
@@ -1659,7 +1658,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
- Node* node, Node* key, FeedbackNexus const& nexus, AccessMode access_mode,
+ Node* node, Node* key, AccessMode access_mode,
KeyedAccessLoadMode load_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSHasProperty);
@@ -1715,54 +1714,24 @@ Reduction JSNativeContextSpecialization::ReduceKeyedLoadFromHeapConstant(
// accesses using the known length, which doesn't change.
if (receiver_ref.IsString()) {
DCHECK_NE(access_mode, AccessMode::kHas);
- // We can only assume that the {index} is a valid array index if the
- // IC is in element access mode and not MEGAMORPHIC, otherwise there's
- // no guard for the bounds check below.
- if (nexus.ic_state() != MEGAMORPHIC && nexus.GetKeyType() == ELEMENT) {
- // Ensure that {key} is less than {receiver} length.
- Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
-
- // Load the single character string from {receiver} or yield
- // undefined if the {key} is out of bounds (depending on the
- // {load_mode}).
- Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
- &control, load_mode);
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
+ // Ensure that {key} is less than {receiver} length.
+ Node* length = jsgraph()->Constant(receiver_ref.AsString().length());
- return NoChange();
-}
-
-Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
- Node* node, Node* key, Node* value, FeedbackNexus const& nexus,
- AccessMode access_mode, KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
- node->opcode() == IrOpcode::kJSStoreProperty ||
- node->opcode() == IrOpcode::kJSStoreInArrayLiteral ||
- node->opcode() == IrOpcode::kJSHasProperty);
-
- Node* receiver = NodeProperties::GetValueInput(node, 0);
-
- if ((access_mode == AccessMode::kLoad || access_mode == AccessMode::kHas) &&
- receiver->opcode() == IrOpcode::kHeapConstant) {
- Reduction reduction = ReduceKeyedLoadFromHeapConstant(
- node, key, nexus, access_mode, load_mode);
- if (reduction.Changed()) return reduction;
+ // Load the single character string from {receiver} or yield
+ // undefined if the {key} is out of bounds (depending on the
+ // {load_mode}).
+ Node* value = BuildIndexedStringLoad(receiver, key, length, &effect,
+ &control, load_mode);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
- return ReducePropertyAccessUsingProcessedFeedback(node, key, base::nullopt,
- value, nexus, access_mode,
- load_mode, store_mode);
+ return NoChange();
}
-Reduction
-JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
+Reduction JSNativeContextSpecialization::ReducePropertyAccess(
Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ FeedbackSource const& source, AccessMode access_mode) {
DCHECK_EQ(key == nullptr, static_name.has_value());
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty ||
@@ -1777,11 +1746,12 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
ProcessedFeedback const* processed = nullptr;
if (FLAG_concurrent_inlining) {
- processed = broker()->GetFeedback(FeedbackSource(nexus));
+ processed = broker()->GetFeedback(source);
// TODO(neis): Infer maps from the graph and consolidate with feedback/hints
// and filter impossible candidates based on inferred root map.
} else {
// TODO(neis): Try to unify this with the similar code in the serializer.
+ FeedbackNexus nexus(source.vector, source.slot);
if (nexus.ic_state() == UNINITIALIZED) {
processed = new (zone()) InsufficientFeedback();
} else {
@@ -1801,8 +1771,8 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
processed = new (zone()) NamedAccessFeedback(*name, access_infos);
} else if (nexus.GetKeyType() == ELEMENT &&
MEGAMORPHIC != nexus.ic_state()) {
- processed =
- broker()->ProcessFeedbackMapsForElementAccess(receiver_maps);
+ processed = broker()->ProcessFeedbackMapsForElementAccess(
+ receiver_maps, KeyedAccessMode::FromNexus(nexus));
}
}
}
@@ -1818,9 +1788,10 @@ JSNativeContextSpecialization::ReducePropertyAccessUsingProcessedFeedback(
return ReduceNamedAccess(node, value, *processed->AsNamedAccess(),
access_mode, key);
case ProcessedFeedback::kElementAccess:
+ CHECK_EQ(processed->AsElementAccess()->keyed_mode.access_mode(),
+ access_mode);
return ReduceElementAccess(node, key, value,
- *processed->AsElementAccess(), access_mode,
- load_mode, store_mode);
+ *processed->AsElementAccess());
case ProcessedFeedback::kGlobalAccess:
UNREACHABLE();
}
@@ -1846,21 +1817,15 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
}
Reduction JSNativeContextSpecialization::ReduceJSHasProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* key = NodeProperties::GetValueInput(node, 1);
Node* value = jsgraph()->Dead();
- // Extract receiver maps from the has property IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kHas, load_mode,
- STANDARD_STORE);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kHas);
}
Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
@@ -1970,6 +1935,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadPropertyWithEnumeratedKey(
}
Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_acess(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* name = NodeProperties::GetValueInput(node, 1);
@@ -1979,62 +1945,49 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
if (reduction.Changed()) return reduction;
}
- // Extract receiver maps from the keyed load IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access load mode from the keyed load IC.
- KeyedAccessLoadMode load_mode = nexus.GetKeyedAccessLoadMode();
-
- // Try to lower the keyed access based on the {nexus}.
Node* value = jsgraph()->Dead();
- return ReduceKeyedAccess(node, name, value, nexus, AccessMode::kLoad,
- load_mode, STANDARD_STORE);
+ return ReducePropertyAccess(node, name, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kLoad);
}
Reduction JSNativeContextSpecialization::ReduceJSStoreProperty(Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
Node* const key = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- // Try to lower the keyed access based on the {nexus}.
- return ReduceKeyedAccess(node, key, value, nexus, AccessMode::kStore,
- STANDARD_LOAD, store_mode);
+ return ReducePropertyAccess(node, key, base::nullopt, value,
+ FeedbackSource(p.feedback()), AccessMode::kStore);
}
Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
Node* receiver, Node* context, Node* frame_state, Node** effect,
Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the getter function.
Node* value;
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
value = *effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
- value = InlineApiCall(receiver, holder, frame_state, nullptr, effect,
- control, shared_info, function_template_info);
+
+ value =
+ InlineApiCall(receiver, holder, frame_state, nullptr, effect, control,
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2052,26 +2005,24 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* receiver, Node* value, Node* context, Node* frame_state,
Node** effect, Node** control, ZoneVector<Node*>* if_exceptions,
PropertyAccessInfo const& access_info) {
- Node* target = jsgraph()->Constant(access_info.constant());
+ ObjectRef constant(broker(), access_info.constant());
+ Node* target = jsgraph()->Constant(constant);
FrameStateInfo const& frame_info = FrameStateInfoOf(frame_state->op());
// Introduce the call to the setter function.
- ObjectRef constant(broker(), access_info.constant());
if (constant.IsJSFunction()) {
*effect = *control = graph()->NewNode(
jsgraph()->javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state, *effect, *control);
} else {
- auto function_template_info = constant.AsFunctionTemplateInfo();
- function_template_info.Serialize();
- Node* holder =
- access_info.holder().is_null()
- ? receiver
- : jsgraph()->Constant(access_info.holder().ToHandleChecked());
+ Node* holder = access_info.holder().is_null()
+ ? receiver
+ : jsgraph()->Constant(ObjectRef(
+ broker(), access_info.holder().ToHandleChecked()));
SharedFunctionInfoRef shared_info(
broker(), frame_info.shared_info().ToHandleChecked());
InlineApiCall(receiver, holder, frame_state, value, effect, control,
- shared_info, function_template_info);
+ shared_info, constant.AsFunctionTemplateInfo());
}
// Remember to rewire the IfException edge if this is inside a try-block.
if (if_exceptions != nullptr) {
@@ -2088,8 +2039,16 @@ Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, SharedFunctionInfoRef const& shared_info,
FunctionTemplateInfoRef const& function_template_info) {
- auto call_handler_info =
- function_template_info.call_code().AsCallHandlerInfo();
+ if (!function_template_info.has_call_code()) {
+ return nullptr;
+ }
+
+ if (!function_template_info.call_code().has_value()) {
+ TRACE_BROKER_MISSING(broker(), "call code for function template info "
+ << function_template_info);
+ return nullptr;
+ }
+ CallHandlerInfoRef call_handler_info = *function_template_info.call_code();
// Only setters have a value.
int const argc = value == nullptr ? 0 : 1;
@@ -2151,7 +2110,8 @@ JSNativeContextSpecialization::BuildPropertyLoad(
value = InlinePropertyGetterCall(receiver, context, frame_state, &effect,
&control, if_exceptions, access_info);
} else if (access_info.IsModuleExport()) {
- Node* cell = jsgraph()->Constant(access_info.export_cell());
+ Node* cell = jsgraph()->Constant(
+ ObjectRef(broker(), access_info.constant()).AsCell());
value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -2382,7 +2342,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
// Check if we need to grow the properties backing store
// with this transitioning store.
MapRef transition_map_ref(broker(), transition_map);
- transition_map_ref.SerializeBackPointer();
MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -2404,7 +2363,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
common()->BeginRegion(RegionObservability::kObservable), effect);
effect = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForMap()), receiver,
- jsgraph()->Constant(transition_map), effect, control);
+ jsgraph()->Constant(transition_map_ref), effect, control);
effect = graph()->NewNode(simplified()->StoreField(field_access), storage,
value, effect, control);
effect = graph()->NewNode(common()->FinishRegion(),
@@ -2495,21 +2454,16 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Reduction JSNativeContextSpecialization::ReduceJSStoreInArrayLiteral(
Node* node) {
+ DisallowHeapAccessIf no_heap_access(FLAG_concurrent_inlining);
DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
FeedbackParameter const& p = FeedbackParameterOf(node->op());
Node* const index = NodeProperties::GetValueInput(node, 1);
Node* const value = NodeProperties::GetValueInput(node, 2);
- // Extract receiver maps from the keyed store IC using the FeedbackNexus.
if (!p.feedback().IsValid()) return NoChange();
- FeedbackNexus nexus(p.feedback().vector(), p.feedback().slot());
-
- // Extract the keyed access store mode from the keyed store IC.
- KeyedAccessStoreMode store_mode = nexus.GetKeyedAccessStoreMode();
-
- return ReduceKeyedAccess(node, index, value, nexus,
- AccessMode::kStoreInLiteral, STANDARD_LOAD,
- store_mode);
+ return ReducePropertyAccess(node, index, base::nullopt, value,
+ FeedbackSource(p.feedback()),
+ AccessMode::kStoreInLiteral);
}
Reduction JSNativeContextSpecialization::ReduceJSToObject(Node* node) {
@@ -2546,8 +2500,7 @@ ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode) {
+ ElementAccessInfo const& access_info, KeyedAccessMode const& keyed_mode) {
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
@@ -2583,7 +2536,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// for Chrome. Node and Electron both set this limit to 0. Setting
// the base to Smi zero here allows the EffectControlLinearizer to
// optimize away the tricky part of the access later.
- if (V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP == 0) {
+ if (JSTypedArray::kMaxSizeInHeap == 0) {
base_pointer = jsgraph()->ZeroConstant();
} else {
base_pointer = effect =
@@ -2629,8 +2582,10 @@ JSNativeContextSpecialization::BuildElementAccess(
buffer_or_receiver = buffer;
}
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS ||
- store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if ((keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) ||
+ (keyed_mode.IsStore() &&
+ keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS)) {
// Only check that the {index} is in SignedSmall range. We do the actual
// bounds check below and just skip the property access if it's out of
// bounds for the {receiver}.
@@ -2651,10 +2606,10 @@ JSNativeContextSpecialization::BuildElementAccess(
// Access the actual element.
ExternalArrayType external_array_type =
GetArrayTypeFromElementsKind(elements_kind);
- switch (access_mode) {
+ switch (keyed_mode.access_mode()) {
case AccessMode::kLoad: {
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(
@@ -2716,7 +2671,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can skip the out-of-bounds store.
- if (store_mode == STORE_IGNORE_OUT_OF_BOUNDS) {
+ if (keyed_mode.store_mode() == STORE_IGNORE_OUT_OF_BOUNDS) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -2766,9 +2721,9 @@ JSNativeContextSpecialization::BuildElementAccess(
// Don't try to store to a copy-on-write backing store (unless supported by
// the store mode).
- if (access_mode == AccessMode::kStore &&
+ if (keyed_mode.access_mode() == AccessMode::kStore &&
IsSmiOrObjectElementsKind(elements_kind) &&
- !IsCOWHandlingStoreMode(store_mode)) {
+ !IsCOWHandlingStoreMode(keyed_mode.store_mode())) {
effect = graph()->NewNode(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
@@ -2791,11 +2746,10 @@ JSNativeContextSpecialization::BuildElementAccess(
elements, effect, control);
// Check if we might need to grow the {elements} backing store.
- if (IsGrowStoreMode(store_mode)) {
+ if (keyed_mode.IsStore() && IsGrowStoreMode(keyed_mode.store_mode())) {
// For growing stores we validate the {index} below.
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
- } else if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ } else if (keyed_mode.IsLoad() &&
+ keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
@@ -2826,7 +2780,7 @@ JSNativeContextSpecialization::BuildElementAccess(
kFullWriteBarrier, LoadSensitivity::kCritical};
// Access the actual element.
- if (access_mode == AccessMode::kLoad) {
+ if (keyed_mode.access_mode() == AccessMode::kLoad) {
// Compute the real element access type, which includes the hole in case
// of holey backing stores.
if (IsHoleyElementsKind(elements_kind)) {
@@ -2839,7 +2793,7 @@ JSNativeContextSpecialization::BuildElementAccess(
}
// Check if we can return undefined for out-of-bounds loads.
- if (load_mode == LOAD_IGNORE_OUT_OF_BOUNDS &&
+ if (keyed_mode.load_mode() == LOAD_IGNORE_OUT_OF_BOUNDS &&
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
@@ -2923,7 +2877,7 @@ JSNativeContextSpecialization::BuildElementAccess(
effect, control);
}
}
- } else if (access_mode == AccessMode::kHas) {
+ } else if (keyed_mode.access_mode() == AccessMode::kHas) {
// For packed arrays with NoElementsProctector valid, a bound check
// is equivalent to HasProperty.
value = effect = graph()->NewNode(simplified()->SpeculativeNumberLessThan(
@@ -2996,8 +2950,9 @@ JSNativeContextSpecialization::BuildElementAccess(
vtrue, vfalse, control);
}
} else {
- DCHECK(access_mode == AccessMode::kStore ||
- access_mode == AccessMode::kStoreInLiteral);
+ DCHECK(keyed_mode.access_mode() == AccessMode::kStore ||
+ keyed_mode.access_mode() == AccessMode::kStoreInLiteral);
+
if (IsSmiElementsKind(elements_kind)) {
value = effect = graph()->NewNode(
simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
@@ -3011,11 +2966,11 @@ JSNativeContextSpecialization::BuildElementAccess(
// Ensure that copy-on-write backing store is writable.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
- } else if (IsGrowStoreMode(store_mode)) {
+ } else if (IsGrowStoreMode(keyed_mode.store_mode())) {
// Determine the length of the {elements} backing store.
Node* elements_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
@@ -3053,7 +3008,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// If we didn't grow {elements}, it might still be COW, in which case we
// copy it now.
if (IsSmiOrObjectElementsKind(elements_kind) &&
- store_mode == STORE_AND_GROW_HANDLE_COW) {
+ keyed_mode.store_mode() == STORE_AND_GROW_HANDLE_COW) {
elements = effect =
graph()->NewNode(simplified()->EnsureWritableFastElements(),
receiver, elements, effect, control);
@@ -3295,7 +3250,8 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(broker(), receiver, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker(), receiver, effect,
+ &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
receiver_maps->push_back(maps[i]);
@@ -3357,8 +3313,6 @@ SimplifiedOperatorBuilder* JSNativeContextSpecialization::simplified() const {
return jsgraph()->simplified();
}
-#undef V8_TYPED_ARRAY_MAX_SIZE_IN_HEAP
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 7de2639966..8510c76bfc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -7,6 +7,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/objects/map.h"
@@ -93,24 +94,15 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
Reduction ReduceJSToObject(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- ElementAccessFeedback const& processed,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ ElementAccessFeedback const& processed);
// In the case of non-keyed (named) accesses, pass the name as {static_name}
// and use {nullptr} for {key} (load/store modes are irrelevant).
- Reduction ReducePropertyAccessUsingProcessedFeedback(
- Node* node, Node* key, base::Optional<NameRef> static_name, Node* value,
- FeedbackNexus const& nexus, AccessMode access_mode,
- KeyedAccessLoadMode load_mode = STANDARD_LOAD,
- KeyedAccessStoreMode store_mode = STANDARD_STORE);
- Reduction ReduceKeyedAccess(Node* node, Node* key, Node* value,
- FeedbackNexus const& nexus,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode,
- KeyedAccessStoreMode store_mode);
+ Reduction ReducePropertyAccess(Node* node, Node* key,
+ base::Optional<NameRef> static_name,
+ Node* value, FeedbackSource const& source,
+ AccessMode access_mode);
Reduction ReduceNamedAccessFromNexus(Node* node, Node* value,
- FeedbackNexus const& nexus,
+ FeedbackSource const& source,
NameRef const& name,
AccessMode access_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
@@ -123,12 +115,10 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
NameRef const& name, AccessMode access_mode,
Node* key, PropertyCellRef const& property_cell);
Reduction ReduceKeyedLoadFromHeapConstant(Node* node, Node* key,
- FeedbackNexus const& nexus,
AccessMode access_mode,
KeyedAccessLoadMode load_mode);
Reduction ReduceElementAccessOnString(Node* node, Node* index, Node* value,
- AccessMode access_mode,
- KeyedAccessLoadMode load_mode);
+ KeyedAccessMode const& keyed_mode);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
Reduction ReduceJSToString(Node* node);
@@ -197,10 +187,11 @@ class V8_EXPORT_PRIVATE JSNativeContextSpecialization final
FunctionTemplateInfoRef const& function_template_info);
// Construct the appropriate subgraph for element access.
- ValueEffectControl BuildElementAccess(
- Node* receiver, Node* index, Node* value, Node* effect, Node* control,
- ElementAccessInfo const& access_info, AccessMode access_mode,
- KeyedAccessLoadMode load_mode, KeyedAccessStoreMode store_mode);
+ ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
+ Node* value, Node* effect,
+ Node* control,
+ ElementAccessInfo const& access_info,
+ KeyedAccessMode const& keyed_mode);
// Construct appropriate subgraph to load from a String.
Node* BuildIndexedStringLoad(Node* receiver, Node* index, Node* length,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index a779790b8d..e0f97922b2 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -17,7 +17,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-std::ostream& operator<<(std::ostream& os, CallFrequency f) {
+std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
if (f.IsUnknown()) return os << "unknown";
return os << f.value();
}
@@ -28,7 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
return os << p.arity() << ", " << p.start_index();
@@ -843,7 +842,8 @@ const Operator* JSOperatorBuilder::Call(size_t arity,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
+const Operator* JSOperatorBuilder::CallWithArrayLike(
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSCallWithArrayLike, Operator::kNoProperties, // opcode
"JSCallWithArrayLike", // name
@@ -899,8 +899,10 @@ const Operator* JSOperatorBuilder::ConstructForwardVarargs(
parameters); // parameter
}
+// Note: frequency is taken by reference to work around a GCC bug
+// on AIX (v8:8193).
const Operator* JSOperatorBuilder::Construct(uint32_t arity,
- CallFrequency frequency,
+ CallFrequency const& frequency,
VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
@@ -911,7 +913,7 @@ const Operator* JSOperatorBuilder::Construct(uint32_t arity,
}
const Operator* JSOperatorBuilder::ConstructWithArrayLike(
- CallFrequency frequency) {
+ CallFrequency const& frequency) {
return new (zone()) Operator1<CallFrequency>( // --
IrOpcode::kJSConstructWithArrayLike, // opcode
Operator::kNoProperties, // properties
@@ -921,7 +923,8 @@ const Operator* JSOperatorBuilder::ConstructWithArrayLike(
}
const Operator* JSOperatorBuilder::ConstructWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency const& frequency,
+ VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
IrOpcode::kJSConstructWithSpread, Operator::kNoProperties, // opcode
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 0f315b1cb5..e7d9acb152 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -48,7 +48,7 @@ class CallFrequency final {
}
bool operator!=(CallFrequency const& that) const { return !(*this == that); }
- friend size_t hash_value(CallFrequency f) {
+ friend size_t hash_value(CallFrequency const& f) {
return bit_cast<uint32_t>(f.value_);
}
@@ -58,7 +58,7 @@ class CallFrequency final {
float value_;
};
-std::ostream& operator<<(std::ostream&, CallFrequency);
+std::ostream& operator<<(std::ostream&, CallFrequency const&);
CallFrequency CallFrequencyOf(Operator const* op) V8_WARN_UNUSED_RESULT;
@@ -101,7 +101,7 @@ ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
// used as a parameter by JSConstruct and JSConstructWithSpread operators.
class ConstructParameters final {
public:
- ConstructParameters(uint32_t arity, CallFrequency frequency,
+ ConstructParameters(uint32_t arity, CallFrequency const& frequency,
VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
@@ -757,7 +757,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation);
- const Operator* CallWithArrayLike(CallFrequency frequency);
+ const Operator* CallWithArrayLike(CallFrequency const& frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
@@ -768,11 +768,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
const Operator* Construct(uint32_t arity,
- CallFrequency frequency = CallFrequency(),
+ CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
- const Operator* ConstructWithArrayLike(CallFrequency frequency);
+ const Operator* ConstructWithArrayLike(CallFrequency const& frequency);
const Operator* ConstructWithSpread(
- uint32_t arity, CallFrequency frequency = CallFrequency(),
+ uint32_t arity, CallFrequency const& frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair());
const Operator* LoadProperty(VectorSlotPair const& feedback);
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 9d882e8238..f3696bcc48 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -44,6 +44,25 @@ bool BinaryOperationHintToNumberOperationHint(
return false;
}
+bool BinaryOperationHintToBigIntOperationHint(
+ BinaryOperationHint binop_hint, BigIntOperationHint* bigint_hint) {
+ switch (binop_hint) {
+ case BinaryOperationHint::kSignedSmall:
+ case BinaryOperationHint::kSignedSmallInputs:
+ case BinaryOperationHint::kSigned32:
+ case BinaryOperationHint::kNumber:
+ case BinaryOperationHint::kNumberOrOddball:
+ case BinaryOperationHint::kAny:
+ case BinaryOperationHint::kNone:
+ case BinaryOperationHint::kString:
+ return false;
+ case BinaryOperationHint::kBigInt:
+ *bigint_hint = BigIntOperationHint::kBigInt;
+ return true;
+ }
+ UNREACHABLE();
+}
+
} // namespace
class JSSpeculativeBinopBuilder final {
@@ -74,6 +93,11 @@ class JSSpeculativeBinopBuilder final {
hint);
}
+ bool GetBinaryBigIntOperationHint(BigIntOperationHint* hint) {
+ return BinaryOperationHintToBigIntOperationHint(GetBinaryOperationHint(),
+ hint);
+ }
+
bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
switch (GetCompareOperationHint()) {
case CompareOperationHint::kSignedSmall:
@@ -138,6 +162,16 @@ class JSSpeculativeBinopBuilder final {
UNREACHABLE();
}
+ const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ switch (op_->opcode()) {
+ case IrOpcode::kJSAdd:
+ return simplified()->SpeculativeBigIntAdd(hint);
+ default:
+ break;
+ }
+ UNREACHABLE();
+ }
+
const Operator* SpeculativeCompareOp(NumberOperationHint hint) {
switch (op_->opcode()) {
case IrOpcode::kJSEqual:
@@ -179,6 +213,16 @@ class JSSpeculativeBinopBuilder final {
return nullptr;
}
+ Node* TryBuildBigIntBinop() {
+ BigIntOperationHint hint;
+ if (GetBinaryBigIntOperationHint(&hint)) {
+ const Operator* op = SpeculativeBigIntOp(hint);
+ Node* node = BuildSpeculativeOperation(op);
+ return node;
+ }
+ return nullptr;
+ }
+
Node* TryBuildNumberCompare() {
NumberOperationHint hint;
if (GetCompareNumberOperationHint(&hint)) {
@@ -264,6 +308,15 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
operand, jsgraph()->SmiConstant(-1), effect,
control, slot);
node = b.TryBuildNumberBinop();
+ if (!node) {
+ FeedbackNexus nexus(feedback_vector(), slot);
+ if (nexus.GetBinaryOperationFeedback() ==
+ BinaryOperationHint::kBigInt) {
+ const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
+ }
break;
}
default:
@@ -345,6 +398,11 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
if (Node* node = b.TryBuildNumberBinop()) {
return LoweringResult::SideEffectFree(node, node, control);
}
+ if (op->opcode() == IrOpcode::kJSAdd) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
+ }
break;
}
case IrOpcode::kJSExponentiate: {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.h b/deps/v8/src/compiler/js-type-hint-lowering.h
index 7164a0b708..a74c019355 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.h
+++ b/deps/v8/src/compiler/js-type-hint-lowering.h
@@ -153,7 +153,8 @@ class JSTypeHintLowering {
private:
friend class JSSpeculativeBinopBuilder;
- Node* TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect, Node* control,
+ Node* TryBuildSoftDeopt(FeedbackNexus& nexus, // NOLINT(runtime/references)
+ Node* effect, Node* control,
DeoptimizeReason reson) const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index ba50b75792..3190fc9930 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -10,6 +10,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -1364,20 +1365,21 @@ Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
Type module_type = NodeProperties::GetType(module);
if (module_type.IsHeapConstant()) {
- ModuleRef module_constant = module_type.AsHeapConstant()->Ref().AsModule();
+ SourceTextModuleRef module_constant =
+ module_type.AsHeapConstant()->Ref().AsSourceTextModule();
CellRef cell_constant = module_constant.GetCell(cell_index);
return jsgraph()->Constant(cell_constant);
}
FieldAccess field_access;
int index;
- if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport) {
+ if (SourceTextModuleDescriptor::GetCellIndexKind(cell_index) ==
+ SourceTextModuleDescriptor::kExport) {
field_access = AccessBuilder::ForModuleRegularExports();
index = cell_index - 1;
} else {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(cell_index),
+ SourceTextModuleDescriptor::kImport);
field_access = AccessBuilder::ForModuleRegularImports();
index = -cell_index - 1;
}
@@ -1408,9 +1410,9 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* value = NodeProperties::GetValueInput(node, 1);
- DCHECK_EQ(
- ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node->op())),
- ModuleDescriptor::kExport);
+ DCHECK_EQ(SourceTextModuleDescriptor::GetCellIndexKind(
+ OpParameter<int32_t>(node->op())),
+ SourceTextModuleDescriptor::kExport);
Node* cell = BuildGetModuleCell(node);
if (cell->op()->EffectOutputCount() > 0) effect = cell;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 8bb47b43e9..1d88a27a5f 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -137,13 +137,19 @@ bool CallDescriptor::CanTailCall(const Node* node) const {
return HasSameReturnLocationsAs(CallDescriptorOf(node->op()));
}
-int CallDescriptor::CalculateFixedFrameSize() const {
+// TODO(jkummerow, sigurds): Arguably frame size calculation should be
+// keyed on code/frame type, not on CallDescriptor kind. Think about a
+// good way to organize this logic.
+int CallDescriptor::CalculateFixedFrameSize(Code::Kind code_kind) const {
switch (kind_) {
case kCallJSFunction:
return PushArgumentCount()
? OptimizedBuiltinFrameConstants::kFixedSlotCount
: StandardFrameConstants::kFixedSlotCount;
case kCallAddress:
+ if (code_kind == Code::C_WASM_ENTRY) {
+ return CWasmEntryFrameConstants::kFixedSlotCount;
+ }
return CommonFrameConstants::kFixedSlotCountAboveFp +
CommonFrameConstants::kCPSlotCount;
case kCallCodeObject:
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index e4fa6f9f20..05eb0e7d11 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -325,7 +325,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool CanTailCall(const Node* call) const;
- int CalculateFixedFrameSize() const;
+ int CalculateFixedFrameSize(Code::Kind code_kind) const;
RegList AllocatableRegisters() const { return allocatable_registers_; }
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index c42bfd839a..f9998723f3 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -419,14 +419,15 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
}
void LoadElimination::AbstractState::FieldsMerge(
- AbstractFields& this_fields, AbstractFields const& that_fields,
+ AbstractFields* this_fields, AbstractFields const& that_fields,
Zone* zone) {
- for (size_t i = 0; i < this_fields.size(); ++i) {
- if (this_fields[i]) {
+ for (size_t i = 0; i < this_fields->size(); ++i) {
+ AbstractField const*& this_field = (*this_fields)[i];
+ if (this_field) {
if (that_fields[i]) {
- this_fields[i] = this_fields[i]->Merge(that_fields[i], zone);
+ this_field = this_field->Merge(that_fields[i], zone);
} else {
- this_fields[i] = nullptr;
+ this_field = nullptr;
}
}
}
@@ -442,8 +443,8 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
// Merge the information we have about the fields.
- FieldsMerge(this->fields_, that->fields_, zone);
- FieldsMerge(this->const_fields_, that->const_fields_, zone);
+ FieldsMerge(&this->fields_, that->fields_, zone);
+ FieldsMerge(&this->const_fields_, that->const_fields_, zone);
// Merge the information we have about the maps.
if (this->maps_) {
@@ -923,20 +924,23 @@ Reduction LoadElimination::ReduceStoreField(Node* node,
FieldInfo const* lookup_result =
state->LookupField(object, field_index, constness);
- if (lookup_result && constness == PropertyConstness::kMutable) {
+ if (lookup_result && (constness == PropertyConstness::kMutable ||
+ V8_ENABLE_DOUBLE_CONST_STORE_CHECK_BOOL)) {
// At runtime, we should never encounter
// - any store replacing existing info with a different, incompatible
// representation, nor
// - two consecutive const stores.
// However, we may see such code statically, so we guard against
// executing it by emitting Unreachable.
- // TODO(gsps): Re-enable the double const store check once we have
- // identified other FieldAccesses that should be marked mutable
- // instead of const (cf. JSCreateLowering::AllocateFastLiteral).
+ // TODO(gsps): Re-enable the double const store check even for
+ // non-debug builds once we have identified other FieldAccesses
+ // that should be marked mutable instead of const
+ // (cf. JSCreateLowering::AllocateFastLiteral).
bool incompatible_representation =
!lookup_result->name.is_null() &&
!IsCompatible(representation, lookup_result->representation);
- if (incompatible_representation) {
+ if (incompatible_representation ||
+ constness == PropertyConstness::kConst) {
Node* control = NodeProperties::GetControlInput(node);
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 7658d01365..4ad1fa64a2 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -233,7 +233,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool FieldsEquals(AbstractFields const& this_fields,
AbstractFields const& that_fields) const;
- void FieldsMerge(AbstractFields& this_fields,
+ void FieldsMerge(AbstractFields* this_fields,
AbstractFields const& that_fields, Zone* zone);
AbstractElements const* elements_ = nullptr;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index d6b88b13f5..41d50549b3 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-analysis.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -12,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
#define OFFSET(x) ((x)&0x1F)
@@ -51,7 +55,8 @@ struct TempLoopInfo {
// marks on edges into/out-of the loop header nodes.
class LoopFinderImpl {
public:
- LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
+ LoopFinderImpl(Graph* graph, LoopTree* loop_tree, TickCounter* tick_counter,
+ Zone* zone)
: zone_(zone),
end_(graph->end()),
queue_(zone),
@@ -63,7 +68,8 @@ class LoopFinderImpl {
loops_found_(0),
width_(0),
backward_(nullptr),
- forward_(nullptr) {}
+ forward_(nullptr),
+ tick_counter_(tick_counter) {}
void Run() {
PropagateBackward();
@@ -116,6 +122,7 @@ class LoopFinderImpl {
int width_;
uint32_t* backward_;
uint32_t* forward_;
+ TickCounter* const tick_counter_;
int num_nodes() {
return static_cast<int>(loop_tree_->node_to_loop_num_.size());
@@ -183,6 +190,7 @@ class LoopFinderImpl {
Queue(end_);
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
info(node);
queue_.pop_front();
@@ -301,6 +309,7 @@ class LoopFinderImpl {
}
// Propagate forward on paths that were backward reachable from backedges.
while (!queue_.empty()) {
+ tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop_front();
queued_.Set(node, false);
@@ -512,11 +521,11 @@ class LoopFinderImpl {
}
};
-
-LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
+LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* zone) {
LoopTree* loop_tree =
new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
- LoopFinderImpl finder(graph, loop_tree, zone);
+ LoopFinderImpl finder(graph, loop_tree, tick_counter, zone);
finder.Run();
if (FLAG_trace_turbo_loop) {
finder.Print();
@@ -524,7 +533,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
return loop_tree;
}
-
Node* LoopTree::HeaderNode(Loop* loop) {
Node* first = *HeaderNodes(loop).begin();
if (first->opcode() == IrOpcode::kLoop) return first;
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
index 620a9554e0..043833a54c 100644
--- a/deps/v8/src/compiler/loop-analysis.h
+++ b/deps/v8/src/compiler/loop-analysis.h
@@ -13,6 +13,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// TODO(titzer): don't assume entry edges have a particular index.
@@ -156,7 +159,8 @@ class LoopTree : public ZoneObject {
class V8_EXPORT_PRIVATE LoopFinder {
public:
// Build a loop tree for the entire graph.
- static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
+ static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index f8e78b2169..80205f80b6 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -240,6 +240,7 @@ class MachineRepresentationInferrer {
MachineType::PointerRepresentation();
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
@@ -428,6 +429,7 @@ class MachineRepresentationChecker {
MachineRepresentation::kWord64);
break;
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kTaggedPoisonOnSpeculation:
CheckValueInputIsTagged(node, 0);
break;
@@ -556,7 +558,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
@@ -700,6 +702,7 @@ class MachineRepresentationChecker {
case IrOpcode::kThrow:
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
+ case IrOpcode::kStaticAssert:
break;
default:
if (node->op()->ValueInputCount() != 0) {
@@ -748,6 +751,11 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressedSigned:
return;
+ case MachineRepresentation::kNone:
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
+ break;
default:
break;
}
@@ -851,6 +859,9 @@ class MachineRepresentationChecker {
case MachineRepresentation::kCompressedPointer:
return;
case MachineRepresentation::kNone: {
+ if (input->opcode() == IrOpcode::kCompressedHeapConstant) {
+ return;
+ }
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index a6a8e87cf4..f720c29084 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -710,7 +710,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64Compare(node);
case IrOpcode::kFloat64RoundDown:
return ReduceFloat64RoundDown(node);
- case IrOpcode::kBitcastTaggedToWord: {
+ case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord: {
NodeMatcher m(node->InputAt(0));
if (m.IsBitcastWordToTaggedSigned()) {
RelaxEffectsAndControls(node);
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d2ddedc8fa..f447861aca 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -140,6 +140,7 @@ MachineType AtomicOpType(Operator const* op) {
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastTaggedSignedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -244,6 +245,13 @@ MachineType AtomicOpType(Operator const* op) {
V(Word32PairShl, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairShr, Operator::kNoProperties, 3, 0, 2) \
V(Word32PairSar, Operator::kNoProperties, 3, 0, 2) \
+ V(F64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Abs, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(F64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(F64x2Lt, Operator::kNoProperties, 2, 0, 1) \
+ V(F64x2Le, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(F32x4SConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
@@ -261,6 +269,17 @@ MachineType AtomicOpType(Operator const* op) {
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Le, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Splat, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Neg, Operator::kNoProperties, 1, 0, 1) \
+ V(I64x2Add, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Sub, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2Mul, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Eq, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2Ne, Operator::kCommutative, 2, 0, 1) \
+ V(I64x2GtS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeS, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GtU, Operator::kNoProperties, 2, 0, 1) \
+ V(I64x2GeU, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Splat, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertF32x4, Operator::kNoProperties, 1, 0, 1) \
V(I32x4SConvertI16x8Low, Operator::kNoProperties, 1, 0, 1) \
@@ -338,6 +357,8 @@ MachineType AtomicOpType(Operator const* op) {
V(S128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(S128Not, Operator::kNoProperties, 1, 0, 1) \
V(S128Select, Operator::kNoProperties, 3, 0, 1) \
+ V(S1x2AnyTrue, Operator::kNoProperties, 1, 0, 1) \
+ V(S1x2AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x4AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
@@ -439,12 +460,15 @@ MachineType AtomicOpType(Operator const* op) {
V(Exchange)
#define SIMD_LANE_OP_LIST(V) \
+ V(F64x2, 2) \
V(F32x4, 4) \
+ V(I64x2, 2) \
V(I32x4, 4) \
V(I16x8, 8) \
V(I8x16, 16)
#define SIMD_FORMAT_LIST(V) \
+ V(64x2, 64) \
V(32x4, 32) \
V(16x8, 16) \
V(8x16, 8)
@@ -754,6 +778,14 @@ struct MachineOperatorGlobalCache {
};
Word32AtomicPairCompareExchangeOperator kWord32AtomicPairCompareExchange;
+ struct MemoryBarrierOperator : public Operator {
+ MemoryBarrierOperator()
+ : Operator(IrOpcode::kMemoryBarrier,
+ Operator::kNoDeopt | Operator::kNoThrow, "MemoryBarrier", 0,
+ 1, 1, 0, 1, 0) {}
+ };
+ MemoryBarrierOperator kMemoryBarrier;
+
// The {BitcastWordToTagged} operator must not be marked as pure (especially
// not idempotent), because otherwise the splitting logic in the Scheduler
// might decide to split these operators, thus potentially creating live
@@ -807,12 +839,12 @@ struct MachineOperatorGlobalCache {
};
Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
- struct DebugAbortOperator : public Operator {
- DebugAbortOperator()
- : Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
- 1, 1, 0, 1, 0) {}
+ struct AbortCSAAssertOperator : public Operator {
+ AbortCSAAssertOperator()
+ : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
+ "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
};
- DebugAbortOperator kDebugAbort;
+ AbortCSAAssertOperator kAbortCSAAssert;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1005,8 +1037,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::DebugAbort() {
- return &cache_.kDebugAbort;
+const Operator* MachineOperatorBuilder::AbortCSAAssert() {
+ return &cache_.kAbortCSAAssert;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
@@ -1017,6 +1049,10 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
+const Operator* MachineOperatorBuilder::MemBarrier() {
+ return &cache_.kMemoryBarrier;
+}
+
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
LoadRepresentation rep) {
#define LOAD(Type) \
@@ -1300,6 +1336,11 @@ const Operator* MachineOperatorBuilder::S8x16Shuffle(
2, 0, 0, 1, 0, 0, array);
}
+const uint8_t* S8x16ShuffleOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kS8x16Shuffle, op->opcode());
+ return OpParameter<uint8_t*>(op);
+}
+
#undef PURE_BINARY_OP_LIST_32
#undef PURE_BINARY_OP_LIST_64
#undef MACHINE_PURE_OP_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 8b1250dd30..0f81301206 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -112,6 +112,9 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op)
+ V8_WARN_UNUSED_RESULT;
+
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -216,7 +219,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
AlignmentRequirements::FullUnalignedAccessSupport());
const Operator* Comment(const char* msg);
- const Operator* DebugAbort();
+ const Operator* AbortCSAAssert();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
@@ -295,9 +298,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Uint64LessThanOrEqual();
const Operator* Uint64Mod();
- // This operator reinterprets the bits of a tagged pointer as word.
+ // This operator reinterprets the bits of a tagged pointer as a word.
const Operator* BitcastTaggedToWord();
+ // This operator reinterprets the bits of a Smi as a word.
+ const Operator* BitcastTaggedSignedToWord();
+
// This operator reinterprets the bits of a tagged MaybeObject pointer as
// word.
const Operator* BitcastMaybeObjectToWord();
@@ -462,6 +468,16 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float64SilenceNaN();
// SIMD operators.
+ const Operator* F64x2Splat();
+ const Operator* F64x2Abs();
+ const Operator* F64x2Neg();
+ const Operator* F64x2ExtractLane(int32_t);
+ const Operator* F64x2ReplaceLane(int32_t);
+ const Operator* F64x2Eq();
+ const Operator* F64x2Ne();
+ const Operator* F64x2Lt();
+ const Operator* F64x2Le();
+
const Operator* F32x4Splat();
const Operator* F32x4ExtractLane(int32_t);
const Operator* F32x4ReplaceLane(int32_t);
@@ -483,6 +499,23 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4Lt();
const Operator* F32x4Le();
+ const Operator* I64x2Splat();
+ const Operator* I64x2ExtractLane(int32_t);
+ const Operator* I64x2ReplaceLane(int32_t);
+ const Operator* I64x2Neg();
+ const Operator* I64x2Shl(int32_t);
+ const Operator* I64x2ShrS(int32_t);
+ const Operator* I64x2Add();
+ const Operator* I64x2Sub();
+ const Operator* I64x2Mul();
+ const Operator* I64x2Eq();
+ const Operator* I64x2Ne();
+ const Operator* I64x2GtS();
+ const Operator* I64x2GeS();
+ const Operator* I64x2ShrU(int32_t);
+ const Operator* I64x2GtU();
+ const Operator* I64x2GeU();
+
const Operator* I32x4Splat();
const Operator* I32x4ExtractLane(int32_t);
const Operator* I32x4ReplaceLane(int32_t);
@@ -585,6 +618,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
+ const Operator* S1x2AnyTrue();
+ const Operator* S1x2AllTrue();
const Operator* S1x4AnyTrue();
const Operator* S1x4AllTrue();
const Operator* S1x8AnyTrue();
@@ -620,6 +655,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
+ // Memory barrier.
+ const Operator* MemBarrier();
+
// atomic-load [base + index]
const Operator* Word32AtomicLoad(LoadRepresentation rep);
// atomic-load [base + index]
diff --git a/deps/v8/src/compiler/map-inference.cc b/deps/v8/src/compiler/map-inference.cc
index f43ba0d155..07ac95b4f7 100644
--- a/deps/v8/src/compiler/map-inference.cc
+++ b/deps/v8/src/compiler/map-inference.cc
@@ -19,7 +19,7 @@ MapInference::MapInference(JSHeapBroker* broker, Node* object, Node* effect)
: broker_(broker), object_(object) {
ZoneHandleSet<Map> maps;
auto result =
- NodeProperties::InferReceiverMaps(broker_, object_, effect, &maps);
+ NodeProperties::InferReceiverMapsUnsafe(broker_, object_, effect, &maps);
maps_.insert(maps_.end(), maps.begin(), maps.end());
maps_state_ = (result == NodeProperties::kUnreliableReceiverMaps)
? kUnreliableDontNeedGuard
@@ -65,21 +65,25 @@ bool MapInference::AllOfInstanceTypes(std::function<bool(InstanceType)> f) {
bool MapInference::AllOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- // TODO(neis): Brokerize the MapInference.
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::all_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+ return std::all_of(maps_.begin(), maps_.end(), instance_type);
}
bool MapInference::AnyOfInstanceTypesUnsafe(
std::function<bool(InstanceType)> f) const {
- AllowHandleDereference allow_handle_deref;
CHECK(HaveMaps());
- return std::any_of(maps_.begin(), maps_.end(),
- [f](Handle<Map> map) { return f(map->instance_type()); });
+ auto instance_type = [this, f](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return f(map_ref.instance_type());
+ };
+
+ return std::any_of(maps_.begin(), maps_.end(), instance_type);
}
MapHandles const& MapInference::GetMaps() {
@@ -122,7 +126,10 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
const VectorSlotPair& feedback) {
if (Safe()) return true;
- auto is_stable = [](Handle<Map> map) { return map->is_stable(); };
+ auto is_stable = [this](Handle<Map> map) {
+ MapRef map_ref(broker_, map);
+ return map_ref.is_stable();
+ };
if (dependencies != nullptr &&
std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
for (Handle<Map> map : maps_) {
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 29cbb4d26c..368c060c1d 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/memory-optimizer.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -20,7 +21,8 @@ namespace compiler {
MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name)
+ const char* function_debug_name,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
@@ -29,7 +31,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
graph_assembler_(jsgraph, nullptr, nullptr, zone),
poisoning_level_(poisoning_level),
allocation_folding_(allocation_folding),
- function_debug_name_(function_debug_name) {}
+ function_debug_name_(function_debug_name),
+ tick_counter_(tick_counter) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -99,7 +102,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
@@ -108,6 +111,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
+ case IrOpcode::kLoadFromObject:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
@@ -118,6 +122,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
+ case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
@@ -214,6 +219,7 @@ Node* EffectPhiForPhi(Node* phi) {
} // namespace
void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
+ tick_counter_->DoTick();
DCHECK(!node->IsDead());
DCHECK_LT(0, node->op()->EffectInputCount());
switch (node->opcode()) {
@@ -296,6 +302,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
+ Node* allocate_builtin;
+ if (allocation_type == AllocationType::kYoung) {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInYoungGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
+ }
+ } else {
+ if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
+ allocate_builtin = __ AllocateInOldGenerationStubConstant();
+ } else {
+ allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
+ }
+ }
+
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
@@ -371,11 +392,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -384,7 +400,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
- __ Call(allocate_operator_.get(), target, size));
+ __ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
@@ -434,11 +450,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
- Node* target = allocation_type == AllocationType::kYoung
- ? __
- AllocateInYoungGenerationStubConstant()
- : __
- AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -446,7 +457,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
- __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
+ __ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
@@ -483,8 +494,6 @@ void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
- Node* offset = node->InputAt(1);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
EnqueueUses(node, state);
}
@@ -494,9 +503,7 @@ void MemoryOptimizer::VisitStoreToObject(Node* node,
DCHECK_EQ(IrOpcode::kStoreToObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
- Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
- node->ReplaceInput(1, __ IntSub(offset, __ IntPtrConstant(kHeapObjectTag)));
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
NodeProperties::ChangeOp(
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index cbefcb67de..71f33fa3d7 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -10,6 +10,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -36,7 +39,7 @@ class MemoryOptimizer final {
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
- const char* function_debug_name);
+ const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
void Optimize();
@@ -158,6 +161,7 @@ class MemoryOptimizer final {
PoisoningMitigationLevel poisoning_level_;
AllocationFolding allocation_folding_;
const char* function_debug_name_;
+ TickCounter* const tick_counter_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index d6528c553a..1e00ec00f4 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -5,6 +5,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/map-inference.h"
@@ -392,7 +393,7 @@ base::Optional<MapRef> NodeProperties::GetJSCreateMap(JSHeapBroker* broker,
}
// static
-NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return) {
HeapObjectMatcher m(receiver);
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 4a23b6781d..a660fe7022 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -151,7 +151,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
kReliableReceiverMaps, // Receiver maps can be trusted.
kUnreliableReceiverMaps // Receiver maps might have changed (side-effect).
};
- static InferReceiverMapsResult InferReceiverMaps(
+ // DO NOT USE InferReceiverMapsUnsafe IN NEW CODE. Use MapInference instead.
+ static InferReceiverMapsResult InferReceiverMapsUnsafe(
JSHeapBroker* broker, Node* receiver, Node* effect,
ZoneHandleSet<Map>* maps_return);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index 50cfdf6248..7688379e9f 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -303,7 +303,13 @@ void Node::Print() const {
void Node::Print(std::ostream& os) const {
os << *this << std::endl;
for (Node* input : this->inputs()) {
- os << " " << *input << std::endl;
+ os << " ";
+ if (input) {
+ os << *input;
+ } else {
+ os << "(NULL)";
+ }
+ os << std::endl;
}
}
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 9ac8ec581f..d621e23e3a 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -45,6 +45,7 @@
V(NumberConstant) \
V(PointerConstant) \
V(HeapConstant) \
+ V(CompressedHeapConstant) \
V(RelocatableInt32Constant) \
V(RelocatableInt64Constant)
@@ -231,6 +232,7 @@
// Opcodes for VirtuaMachine-level operators.
#define SIMPLIFIED_CHANGE_OP_LIST(V) \
+ V(ChangeCompressedSignedToInt32) \
V(ChangeTaggedSignedToInt32) \
V(ChangeTaggedSignedToInt64) \
V(ChangeTaggedToInt32) \
@@ -240,6 +242,7 @@
V(ChangeTaggedToTaggedSigned) \
V(ChangeCompressedToTaggedSigned) \
V(ChangeTaggedToCompressedSigned) \
+ V(ChangeInt31ToCompressedSigned) \
V(ChangeInt31ToTaggedSigned) \
V(ChangeInt32ToTagged) \
V(ChangeInt64ToTagged) \
@@ -249,6 +252,8 @@
V(ChangeFloat64ToTaggedPointer) \
V(ChangeTaggedToBit) \
V(ChangeBitToTagged) \
+ V(ChangeUint64ToBigInt) \
+ V(TruncateBigIntToUint64) \
V(TruncateTaggedToWord32) \
V(TruncateTaggedToFloat64) \
V(TruncateTaggedToBit) \
@@ -262,6 +267,7 @@
V(CheckedUint32Div) \
V(CheckedUint32Mod) \
V(CheckedInt32Mul) \
+ V(CheckedInt32ToCompressedSigned) \
V(CheckedInt32ToTaggedSigned) \
V(CheckedInt64ToInt32) \
V(CheckedInt64ToTaggedSigned) \
@@ -318,6 +324,8 @@
V(NumberMin) \
V(NumberPow)
+#define SIMPLIFIED_BIGINT_BINOP_LIST(V) V(BigIntAdd)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
V(SpeculativeNumberAdd) \
V(SpeculativeNumberSubtract) \
@@ -369,6 +377,11 @@
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
+#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \
+ V(BigIntAsUintN) \
+ V(BigIntNegate) \
+ V(CheckBigInt)
+
#define SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) V(SpeculativeToNumber)
#define SIMPLIFIED_OTHER_OP_LIST(V) \
@@ -382,6 +395,7 @@
V(StringCodePointAt) \
V(StringFromSingleCharCode) \
V(StringFromSingleCodePoint) \
+ V(StringFromCodePointAt) \
V(StringIndexOf) \
V(StringLength) \
V(StringToLowerCaseIntl) \
@@ -461,16 +475,24 @@
V(FindOrderedHashMapEntryForInt32Key) \
V(PoisonIndex) \
V(RuntimeAbort) \
+ V(AssertType) \
V(DateNow)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) V(SpeculativeBigIntAdd)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate)
+
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
SIMPLIFIED_CHECKED_OP_LIST(V) \
SIMPLIFIED_COMPARE_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_BINOP_LIST(V) \
+ SIMPLIFIED_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(V) \
SIMPLIFIED_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_BIGINT_UNOP_LIST(V) \
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
SIMPLIFIED_OTHER_OP_LIST(V)
// Opcodes for Machine-level operators.
@@ -616,7 +638,7 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_WORD64_ATOMIC_OP_LIST(V) \
- V(DebugAbort) \
+ V(AbortCSAAssert) \
V(DebugBreak) \
V(Comment) \
V(Load) \
@@ -631,6 +653,7 @@
V(Word64ReverseBytes) \
V(Int64AbsWithOverflow) \
V(BitcastTaggedToWord) \
+ V(BitcastTaggedSignedToWord) \
V(BitcastWordToTagged) \
V(BitcastWordToTaggedSigned) \
V(TruncateFloat64ToWord32) \
@@ -692,6 +715,7 @@
V(Word32PairSar) \
V(ProtectedLoad) \
V(ProtectedStore) \
+ V(MemoryBarrier) \
V(Word32AtomicLoad) \
V(Word32AtomicStore) \
V(Word32AtomicExchange) \
@@ -718,6 +742,15 @@
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
+ V(F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
V(F32x4ReplaceLane) \
@@ -739,6 +772,22 @@
V(F32x4Le) \
V(F32x4Gt) \
V(F32x4Ge) \
+ V(I64x2Splat) \
+ V(I64x2ExtractLane) \
+ V(I64x2ReplaceLane) \
+ V(I64x2Neg) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul) \
+ V(I64x2Eq) \
+ V(I64x2Ne) \
+ V(I64x2GtS) \
+ V(I64x2GeS) \
+ V(I64x2ShrU) \
+ V(I64x2GtU) \
+ V(I64x2GeU) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
V(I32x4ReplaceLane) \
@@ -844,6 +893,8 @@
V(S128Xor) \
V(S128Select) \
V(S8x16Shuffle) \
+ V(S1x2AnyTrue) \
+ V(S1x2AllTrue) \
V(S1x4AnyTrue) \
V(S1x4AllTrue) \
V(S1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 475623f76b..8cb991ceb7 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/operation-typer.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/type-cache.h"
#include "src/compiler/types.h"
#include "src/execution/isolate.h"
@@ -259,7 +260,8 @@ Type OperationTyper::ConvertReceiver(Type type) {
type = Type::Intersect(type, Type::Receiver(), zone());
if (maybe_primitive) {
// ConvertReceiver maps null and undefined to the JSGlobalProxy of the
- // target function, and all other primitives are wrapped into a JSValue.
+ // target function, and all other primitives are wrapped into a
+ // JSPrimitiveWrapper.
type = Type::Union(type, Type::OtherObject(), zone());
}
return type;
@@ -577,6 +579,13 @@ Type OperationTyper::NumberSilenceNaN(Type type) {
return type;
}
+Type OperationTyper::BigIntAsUintN(Type type) {
+ DCHECK(type.Is(Type::BigInt()));
+ return Type::BigInt();
+}
+
+Type OperationTyper::CheckBigInt(Type type) { return Type::BigInt(); }
+
Type OperationTyper::NumberAdd(Type lhs, Type rhs) {
DCHECK(lhs.Is(Type::Number()));
DCHECK(rhs.Is(Type::Number()));
@@ -1111,6 +1120,26 @@ SPECULATIVE_NUMBER_BINOP(NumberShiftRight)
SPECULATIVE_NUMBER_BINOP(NumberShiftRightLogical)
#undef SPECULATIVE_NUMBER_BINOP
+Type OperationTyper::BigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::BigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntAdd(Type lhs, Type rhs) {
+ if (lhs.IsNone() || rhs.IsNone()) return Type::None();
+ return Type::BigInt();
+}
+
+Type OperationTyper::SpeculativeBigIntNegate(Type type) {
+ if (type.IsNone()) return type;
+ return Type::BigInt();
+}
+
Type OperationTyper::SpeculativeToNumber(Type type) {
return ToNumber(Type::Intersect(type, Type::NumberOrOddball(), zone()));
}
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index a905662ad1..728e297a1b 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -43,14 +43,18 @@ class V8_EXPORT_PRIVATE OperationTyper {
// Unary operators.
#define DECLARE_METHOD(Name) Type Name(Type type);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
DECLARE_METHOD(ConvertReceiver)
#undef DECLARE_METHOD
-// Number binary operators.
+// Numeric binary operators.
#define DECLARE_METHOD(Name) Type Name(Type lhs, Type rhs);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
// Comparison operators.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e771cef123..eb060b71e1 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -16,6 +16,7 @@
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/register-configuration.h"
+#include "src/compiler/add-type-assertions-reducer.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/frame-elider.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -34,6 +35,7 @@
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/csa-load-elimination.h"
#include "src/compiler/dead-code-elimination.h"
#include "src/compiler/decompression-elimination.h"
#include "src/compiler/effect-control-linearizer.h"
@@ -114,7 +116,8 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, ZONE_NAME),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone())),
+ broker_(new JSHeapBroker(isolate_, info_->zone(),
+ info_->trace_heap_broker_enabled())),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
assembler_options_(AssemblerOptions::Default(isolate)) {
@@ -266,7 +269,7 @@ class PipelineData {
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
MachineGraph* mcgraph() const { return mcgraph_; }
- Handle<Context> native_context() const {
+ Handle<NativeContext> native_context() const {
return handle(info()->native_context(), isolate());
}
Handle<JSGlobalObject> global_object() const {
@@ -324,7 +327,8 @@ class PipelineData {
Typer* CreateTyper() {
DCHECK_NULL(typer_);
- typer_ = new Typer(broker(), typer_flags_, graph());
+ typer_ =
+ new Typer(broker(), typer_flags_, graph(), &info()->tick_counter());
return typer_;
}
@@ -397,7 +401,8 @@ class PipelineData {
DCHECK_NULL(frame_);
int fixed_frame_size = 0;
if (call_descriptor != nullptr) {
- fixed_frame_size = call_descriptor->CalculateFixedFrameSize();
+ fixed_frame_size =
+ call_descriptor->CalculateFixedFrameSize(info()->code_kind());
}
frame_ = new (codegen_zone()) Frame(fixed_frame_size);
}
@@ -408,7 +413,8 @@ class PipelineData {
DCHECK_NULL(register_allocation_data_);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), flags, debug_name());
+ sequence(), flags, &info()->tick_counter(),
+ debug_name());
}
void InitializeOsrHelper() {
@@ -1040,6 +1046,119 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
code->set_can_have_weak_objects(true);
}
+class WasmHeapStubCompilationJob final : public OptimizedCompilationJob {
+ public:
+ WasmHeapStubCompilationJob(Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph,
+ Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions)
+ // Note that the OptimizedCompilationInfo is not initialized at the time
+ // we pass it to the CompilationJob constructor, but it is not
+ // dereferenced there.
+ : OptimizedCompilationJob(isolate->stack_guard()->real_climit(), &info_,
+ "TurboFan"),
+ debug_name_(std::move(debug_name)),
+ info_(CStrVector(debug_name_.get()), graph->zone(), kind),
+ call_descriptor_(call_descriptor),
+ zone_stats_(isolate->allocator()),
+ zone_(std::move(zone)),
+ graph_(graph),
+ data_(&zone_stats_, &info_, isolate, graph_, nullptr, source_positions,
+ new (zone_.get()) NodeOriginTable(graph_), nullptr, options),
+ pipeline_(&data_) {}
+
+ ~WasmHeapStubCompilationJob() = default;
+
+ protected:
+ Status PrepareJobImpl(Isolate* isolate) final;
+ Status ExecuteJobImpl() final;
+ Status FinalizeJobImpl(Isolate* isolate) final;
+
+ private:
+ std::unique_ptr<char[]> debug_name_;
+ OptimizedCompilationInfo info_;
+ CallDescriptor* call_descriptor_;
+ ZoneStats zone_stats_;
+ std::unique_ptr<Zone> zone_;
+ Graph* graph_;
+ PipelineData data_;
+ PipelineImpl pipeline_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmHeapStubCompilationJob);
+};
+
+// static
+std::unique_ptr<OptimizedCompilationJob>
+Pipeline::NewWasmHeapStubCompilationJob(Isolate* isolate,
+ CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone,
+ Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name,
+ const AssemblerOptions& options,
+ SourcePositionTable* source_positions) {
+ return base::make_unique<WasmHeapStubCompilationJob>(
+ isolate, call_descriptor, std::move(zone), graph, kind,
+ std::move(debug_name), options, source_positions);
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::PrepareJobImpl(
+ Isolate* isolate) {
+ std::unique_ptr<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
+ pipeline_statistics.reset(new PipelineStatistics(
+ &info_, isolate->GetTurboStatistics(), &zone_stats_));
+ pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
+ }
+ if (info_.trace_turbo_json_enabled() || info_.trace_turbo_graph_enabled()) {
+ CodeTracer::Scope tracing_scope(data_.GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling method " << info_.GetDebugName().get()
+ << " using TurboFan" << std::endl;
+ }
+ if (info_.trace_turbo_graph_enabled()) { // Simple textual RPO.
+ StdoutStream{} << "-- wasm stub " << Code::Kind2String(info_.code_kind())
+ << " graph -- " << std::endl
+ << AsRPO(*data_.graph());
+ }
+
+ if (info_.trace_turbo_json_enabled()) {
+ TurboJsonFile json_of(&info_, std::ios_base::trunc);
+ json_of << "{\"function\":\"" << info_.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ }
+ pipeline_.RunPrintAndVerify("V8.WasmMachineCode", true);
+ return CompilationJob::SUCCEEDED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::ExecuteJobImpl() {
+ pipeline_.ComputeScheduledGraph();
+ if (pipeline_.SelectInstructionsAndAssemble(call_descriptor_)) {
+ return CompilationJob::SUCCEEDED;
+ }
+ return CompilationJob::FAILED;
+}
+
+CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
+ Isolate* isolate) {
+ Handle<Code> code;
+ if (pipeline_.FinalizeCode(call_descriptor_).ToHandle(&code) &&
+ pipeline_.CommitDependencies(code)) {
+ info_.SetCode(code);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ code->Disassemble(compilation_info()->GetDebugName().get(), os);
+ }
+#endif
+ return SUCCEEDED;
+ }
+ return FAILED;
+}
+
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
PipelineRunScope scope(this->data_, Phase::phase_name());
@@ -1065,7 +1184,7 @@ struct GraphBuilderPhase {
handle(data->info()->closure()->feedback_vector(), data->isolate()),
data->info()->osr_offset(), data->jsgraph(), frequency,
data->source_positions(), data->native_context(),
- SourcePosition::kNotInlined, flags);
+ SourcePosition::kNotInlined, flags, &data->info()->tick_counter());
}
};
@@ -1102,7 +1221,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
Isolate* isolate = data->isolate();
OptimizedCompilationInfo* info = data->info();
- GraphReducer graph_reducer(temp_zone, data->graph(),
+ GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1196,6 +1315,7 @@ struct UntyperPhase {
}
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -1216,6 +1336,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
@@ -1242,13 +1363,13 @@ struct SerializationPhase {
if (data->info()->is_source_positions_enabled()) {
flags |= SerializerForBackgroundCompilationFlag::kCollectSourcePositions;
}
- if (data->info()->is_osr()) {
- flags |= SerializerForBackgroundCompilationFlag::kOsr;
+ if (data->info()->is_analyze_environment_liveness()) {
+ flags |=
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness;
}
- SerializerForBackgroundCompilation serializer(
- data->broker(), data->dependencies(), temp_zone,
- data->info()->closure(), flags);
- serializer.Run();
+ RunSerializerForBackgroundCompilation(data->broker(), data->dependencies(),
+ temp_zone, data->info()->closure(),
+ flags, data->info()->osr_offset());
}
};
@@ -1257,6 +1378,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1292,9 +1414,12 @@ struct EscapeAnalysisPhase {
static const char* phase_name() { return "V8.TFEscapeAnalysis"; }
void Run(PipelineData* data, Zone* temp_zone) {
- EscapeAnalysis escape_analysis(data->jsgraph(), temp_zone);
+ EscapeAnalysis escape_analysis(data->jsgraph(),
+ &data->info()->tick_counter(), temp_zone);
escape_analysis.ReduceGraph();
- GraphReducer reducer(temp_zone, data->graph(), data->jsgraph()->Dead());
+ GraphReducer reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
@@ -1305,13 +1430,28 @@ struct EscapeAnalysisPhase {
}
};
+struct TypeAssertionsPhase {
+ static const char* phase_name() { return "V8.TFTypeAssertions"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &type_assertions);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct SimplifiedLoweringPhase {
static const char* phase_name() { return "V8.TFSimplifiedLowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel());
+ data->info()->GetPoisoningMitigationLevel(),
+ &data->info()->tick_counter());
lowering.LowerAllNodes();
}
};
@@ -1325,8 +1465,8 @@ struct LoopPeelingPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- LoopTree* loop_tree =
- LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
+ LoopTree* loop_tree = LoopFinder::BuildLoopTree(
+ data->jsgraph()->graph(), &data->info()->tick_counter(), temp_zone);
LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
data->source_positions(), data->node_origins())
.PeelInnerLoopsOfTree();
@@ -1346,6 +1486,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -1358,6 +1499,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1384,7 +1526,8 @@ struct ControlFlowOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
ControlFlowOptimizer optimizer(data->graph(), data->common(),
- data->machine(), temp_zone);
+ data->machine(),
+ &data->info()->tick_counter(), temp_zone);
optimizer.Optimize();
}
};
@@ -1406,8 +1549,9 @@ struct EffectControlLinearizationPhase {
// fix the effect and control flow for nodes with low-level side
// effects (such as changing representation to tagged or
// 'floating' allocation regions.)
- Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kTempSchedule);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ temp_zone, data->graph(), Scheduler::kTempSchedule,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
TraceSchedule(data->info(), data, schedule,
"effect linearization schedule");
@@ -1433,6 +1577,7 @@ struct EffectControlLinearizationPhase {
// doing a common operator reducer and dead code elimination just before
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1455,7 +1600,8 @@ struct StoreStoreEliminationPhase {
data->jsgraph()->GetCachedNodes(&roots);
trimmer.TrimGraph(roots.begin(), roots.end());
- StoreStoreElimination::Run(data->jsgraph(), temp_zone);
+ StoreStoreElimination::Run(data->jsgraph(), &data->info()->tick_counter(),
+ temp_zone);
}
};
@@ -1464,6 +1610,7 @@ struct LoadEliminationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1513,7 +1660,7 @@ struct MemoryOptimizationPhase {
data->info()->is_allocation_folding_enabled()
? MemoryOptimizer::AllocationFolding::kDoAllocationFolding
: MemoryOptimizer::AllocationFolding::kDontAllocationFolding,
- data->debug_name());
+ data->debug_name(), &data->info()->tick_counter());
optimizer.Optimize();
}
};
@@ -1523,6 +1670,7 @@ struct LateOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1555,6 +1703,7 @@ struct MachineOperatorOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -1565,11 +1714,38 @@ struct MachineOperatorOptimizationPhase {
}
};
+struct CsaEarlyOptimizationPhase {
+ static const char* phase_name() { return "V8.CSAEarlyOptimization"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
+ data->jsgraph()->Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer,
+ data->jsgraph(), temp_zone);
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common(), temp_zone);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->broker(), data->common(),
+ data->machine(), temp_zone);
+ ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
+ CsaLoadElimination load_elimination(&graph_reducer, data->jsgraph(),
+ temp_zone);
+ AddReducer(data, &graph_reducer, &branch_condition_elimination);
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &load_elimination);
+ graph_reducer.ReduceGraph();
+ }
+};
+
struct CsaOptimizationPhase {
static const char* phase_name() { return "V8.CSAOptimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
+ &data->info()->tick_counter(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1621,9 +1797,10 @@ struct ComputeSchedulePhase {
void Run(PipelineData* data, Zone* temp_zone) {
Schedule* schedule = Scheduler::ComputeSchedule(
- temp_zone, data->graph(), data->info()->is_splitting_enabled()
- ? Scheduler::kSplitNodes
- : Scheduler::kNoFlags);
+ temp_zone, data->graph(),
+ data->info()->is_splitting_enabled() ? Scheduler::kSplitNodes
+ : Scheduler::kNoFlags,
+ &data->info()->tick_counter());
if (FLAG_turbo_verify) ScheduleVerifier::Run(schedule);
data->set_schedule(schedule);
}
@@ -1671,6 +1848,7 @@ struct InstructionSelectionPhase {
data->info()->switch_jump_table_enabled()
? InstructionSelector::kEnableSwitchJumpTable
: InstructionSelector::kDisableSwitchJumpTable,
+ &data->info()->tick_counter(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions,
@@ -1920,7 +2098,8 @@ struct PrintGraphPhase {
Schedule* schedule = data->schedule();
if (schedule == nullptr) {
schedule = Scheduler::ComputeSchedule(temp_zone, data->graph(),
- Scheduler::kNoFlags);
+ Scheduler::kNoFlags,
+ &info->tick_counter());
}
AllowHandleDereference allow_deref;
@@ -2089,6 +2268,11 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
RunPrintAndVerify(EscapeAnalysisPhase::phase_name());
}
+ if (FLAG_assert_types) {
+ Run<TypeAssertionsPhase>();
+ RunPrintAndVerify(TypeAssertionsPhase::phase_name());
+ }
+
// Perform simplified lowering. This has to run w/o the Typer decorator,
// because we cannot compute meaningful types anyways, and the computed types
// might even conflict with the representation/truncation logic.
@@ -2201,6 +2385,9 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("V8.TFMachineCode");
}
+ pipeline.Run<CsaEarlyOptimizationPhase>();
+ pipeline.RunPrintAndVerify(CsaEarlyOptimizationPhase::phase_name(), true);
+
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
@@ -2331,58 +2518,6 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
}
// static
-MaybeHandle<Code> Pipeline::GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name, const AssemblerOptions& options,
- SourcePositionTable* source_positions) {
- OptimizedCompilationInfo info(CStrVector(debug_name), graph->zone(), kind);
- // Construct a pipeline for scheduling and code generation.
- ZoneStats zone_stats(isolate->allocator());
- NodeOriginTable* node_positions = new (graph->zone()) NodeOriginTable(graph);
- PipelineData data(&zone_stats, &info, isolate, graph, nullptr,
- source_positions, node_positions, nullptr, options);
- std::unique_ptr<PipelineStatistics> pipeline_statistics;
- if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
- pipeline_statistics.reset(new PipelineStatistics(
- &info, isolate->GetTurboStatistics(), &zone_stats));
- pipeline_statistics->BeginPhaseKind("V8.WasmStubCodegen");
- }
-
- PipelineImpl pipeline(&data);
-
- if (info.trace_turbo_json_enabled() ||
- info.trace_turbo_graph_enabled()) {
- CodeTracer::Scope tracing_scope(data.GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "---------------------------------------------------\n"
- << "Begin compiling method " << info.GetDebugName().get()
- << " using TurboFan" << std::endl;
- }
-
- if (info.trace_turbo_graph_enabled()) { // Simple textual RPO.
- StdoutStream{} << "-- wasm stub " << Code::Kind2String(kind) << " graph -- "
- << std::endl
- << AsRPO(*graph);
- }
-
- if (info.trace_turbo_json_enabled()) {
- TurboJsonFile json_of(&info, std::ios_base::trunc);
- json_of << "{\"function\":\"" << info.GetDebugName().get()
- << "\", \"source\":\"\",\n\"phases\":[";
- }
-
- pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
- pipeline.ComputeScheduledGraph();
-
- Handle<Code> code;
- if (pipeline.GenerateCode(call_descriptor).ToHandle(&code) &&
- pipeline.CommitDependencies(code)) {
- return code;
- }
- return MaybeHandle<Code>();
-}
-
-// static
MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
OptimizedCompilationInfo* info, Isolate* isolate,
std::unique_ptr<JSHeapBroker>* out_broker) {
@@ -2449,11 +2584,11 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
}
// static
-OptimizedCompilationJob* Pipeline::NewCompilationJob(
+std::unique_ptr<OptimizedCompilationJob> Pipeline::NewCompilationJob(
Isolate* isolate, Handle<JSFunction> function, bool has_script) {
Handle<SharedFunctionInfo> shared =
handle(function->shared(), function->GetIsolate());
- return new PipelineCompilationJob(isolate, shared, function);
+ return base::make_unique<PipelineCompilationJob>(isolate, shared, function);
}
// static
@@ -2490,13 +2625,14 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.RunPrintAndVerify("V8.WasmMachineCode", true);
data.BeginPhaseKind("V8.WasmOptimization");
- const bool is_asm_js = module->origin == wasm::kAsmJsOrigin;
+ const bool is_asm_js = is_asmjs_module(module);
if (FLAG_turbo_splitting && !is_asm_js) {
data.info()->MarkAsSplittingEnabled();
}
if (FLAG_wasm_opt || is_asm_js) {
PipelineRunScope scope(&data, "V8.WasmFullOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
@@ -2515,6 +2651,7 @@ void Pipeline::GenerateCodeForWasmFunction(
} else {
PipelineRunScope scope(&data, "V8.WasmBaseOptimization");
GraphReducer graph_reducer(scope.zone(), data.graph(),
+ &data.info()->tick_counter(),
data.mcgraph()->Dead());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
@@ -2870,8 +3007,9 @@ bool PipelineImpl::SelectInstructionsAndAssemble(
}
MaybeHandle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
- if (!SelectInstructionsAndAssemble(call_descriptor))
+ if (!SelectInstructionsAndAssemble(call_descriptor)) {
return MaybeHandle<Code>();
+ }
return FinalizeCode();
}
@@ -2928,6 +3066,9 @@ void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
if (data->info()->is_turbo_preprocess_ranges()) {
flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
}
+ if (data->info()->trace_turbo_allocation_enabled()) {
+ flags |= RegisterAllocationFlag::kTraceAllocation;
+ }
data->InitializeRegisterAllocationData(config, call_descriptor, flags);
if (info()->is_osr()) data->osr_helper()->SetupFrame(data->frame());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 7f9a242d98..6898faaad0 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -41,9 +41,8 @@ class SourcePositionTable;
class Pipeline : public AllStatic {
public:
// Returns a new compilation job for the given JavaScript function.
- static OptimizedCompilationJob* NewCompilationJob(Isolate* isolate,
- Handle<JSFunction> function,
- bool has_script);
+ static std::unique_ptr<OptimizedCompilationJob> NewCompilationJob(
+ Isolate* isolate, Handle<JSFunction> function, bool has_script);
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
@@ -60,11 +59,11 @@ class Pipeline : public AllStatic {
const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
- // Run the pipeline on a machine graph and generate code.
- static MaybeHandle<Code> GenerateCodeForWasmHeapStub(
- Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
- Code::Kind kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ // Returns a new compilation job for a wasm heap stub.
+ static std::unique_ptr<OptimizedCompilationJob> NewWasmHeapStubCompilationJob(
+ Isolate* isolate, CallDescriptor* call_descriptor,
+ std::unique_ptr<Zone> zone, Graph* graph, Code::Kind kind,
+ std::unique_ptr<char[]> debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions = nullptr);
// Run the pipeline on a machine graph and generate code.
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index dafd481797..99a06ef874 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -127,7 +127,7 @@ Node* PropertyAccessBuilder::ResolveHolder(
PropertyAccessInfo const& access_info, Node* receiver) {
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
- return jsgraph()->Constant(holder);
+ return jsgraph()->Constant(ObjectRef(broker(), holder));
}
return receiver;
}
@@ -151,7 +151,16 @@ MachineRepresentation PropertyAccessBuilder::ConvertRepresentation(
Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
NameRef const& name, PropertyAccessInfo const& access_info,
Node* receiver) {
+ // TODO(neis): Eliminate FastPropertyAt call below by doing the lookup during
+ // acccess info computation. Requires extra care in the case where the
+ // receiver is the holder.
+ AllowCodeDependencyChange dependency_change_;
+ AllowHandleAllocation handle_allocation_;
+ AllowHandleDereference handle_dereference_;
+ AllowHeapAllocation heap_allocation_;
+
if (!access_info.IsDataConstant()) return nullptr;
+
// First, determine if we have a constant holder to load from.
Handle<JSObject> holder;
// If {access_info} has a holder, just use it.
@@ -165,7 +174,7 @@ Node* PropertyAccessBuilder::TryBuildLoadConstantDataField(
MapRef receiver_map = m.Ref(broker()).map();
if (std::find_if(access_info.receiver_maps().begin(),
access_info.receiver_maps().end(), [&](Handle<Map> map) {
- return map.address() == receiver_map.object().address();
+ return map.equals(receiver_map.object());
}) == access_info.receiver_maps().end()) {
// The map of the receiver is not in the feedback, let us bail out.
return nullptr;
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index dc1edc710d..277c89c932 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -556,8 +556,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::DebugAbort(Node* message) {
- AddNode(machine()->DebugAbort(), message);
+void RawMachineAssembler::AbortCSAAssert(Node* message) {
+ AddNode(machine()->AbortCSAAssert(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 67326ac730..890c38c551 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -732,6 +732,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* BitcastTaggedToWord(Node* a) {
return AddNode(machine()->BitcastTaggedToWord(), a);
}
+ Node* BitcastTaggedSignedToWord(Node* a) {
+ return AddNode(machine()->BitcastTaggedSignedToWord(), a);
+ }
Node* BitcastMaybeObjectToWord(Node* a) {
return AddNode(machine()->BitcastMaybeObjectToWord(), a);
}
@@ -1016,7 +1019,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void DebugAbort(Node* message);
+ void AbortCSAAssert(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 0822e47bba..9b401bcf43 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -19,6 +19,7 @@ RedundancyElimination::~RedundancyElimination() = default;
Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
+ case IrOpcode::kCheckBigInt:
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckEqualsInternalizedString:
case IrOpcode::kCheckEqualsSymbol:
@@ -147,7 +148,9 @@ bool CheckSubsumes(Node const* a, Node const* b) {
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
case IrOpcode::kCheckNumber:
+ case IrOpcode::kCheckBigInt:
break;
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index cebd87e73d..7a4577b799 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/type-cache.h"
@@ -25,12 +26,14 @@ const char* Truncation::description() const {
return "truncate-to-bool";
case TruncationKind::kWord32:
return "truncate-to-word32";
- case TruncationKind::kFloat64:
+ case TruncationKind::kWord64:
+ return "truncate-to-word64";
+ case TruncationKind::kOddballAndBigIntToNumber:
switch (identify_zeros()) {
case kIdentifyZeros:
- return "truncate-to-float64 (identify zeros)";
+ return "truncate-oddball&bigint-to-number (identify zeros)";
case kDistinguishZeros:
- return "truncate-to-float64 (distinguish zeros)";
+ return "truncate-oddball&bigint-to-number (distinguish zeros)";
}
case TruncationKind::kAny:
switch (identify_zeros()) {
@@ -45,22 +48,25 @@ const char* Truncation::description() const {
// Partial order for truncations:
//
-// kAny <-------+
-// ^ |
-// | |
-// kFloat64 |
-// ^ |
-// / |
-// kWord32 kBool
-// ^ ^
-// \ /
-// \ /
-// \ /
-// \ /
-// \ /
-// kNone
+// kAny <-------+
+// ^ |
+// | |
+// kOddballAndBigIntToNumber |
+// ^ |
+// / |
+// kWord64 |
+// ^ |
+// | |
+// kWord32 kBool
+// ^ ^
+// \ /
+// \ /
+// \ /
+// \ /
+// \ /
+// kNone
//
-// TODO(jarin) We might consider making kBool < kFloat64.
+// TODO(jarin) We might consider making kBool < kOddballAndBigIntToNumber.
// static
Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
@@ -68,9 +74,9 @@ Truncation::TruncationKind Truncation::Generalize(TruncationKind rep1,
if (LessGeneral(rep1, rep2)) return rep2;
if (LessGeneral(rep2, rep1)) return rep1;
// Handle the generalization of float64-representable values.
- if (LessGeneral(rep1, TruncationKind::kFloat64) &&
- LessGeneral(rep2, TruncationKind::kFloat64)) {
- return TruncationKind::kFloat64;
+ if (LessGeneral(rep1, TruncationKind::kOddballAndBigIntToNumber) &&
+ LessGeneral(rep2, TruncationKind::kOddballAndBigIntToNumber)) {
+ return TruncationKind::kOddballAndBigIntToNumber;
}
// Handle the generalization of any-representable values.
if (LessGeneral(rep1, TruncationKind::kAny) &&
@@ -101,9 +107,16 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
return rep2 == TruncationKind::kBool || rep2 == TruncationKind::kAny;
case TruncationKind::kWord32:
return rep2 == TruncationKind::kWord32 ||
- rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
- case TruncationKind::kFloat64:
- return rep2 == TruncationKind::kFloat64 || rep2 == TruncationKind::kAny;
+ rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kWord64:
+ return rep2 == TruncationKind::kWord64 ||
+ rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
+ case TruncationKind::kOddballAndBigIntToNumber:
+ return rep2 == TruncationKind::kOddballAndBigIntToNumber ||
+ rep2 == TruncationKind::kAny;
case TruncationKind::kAny:
return rep2 == TruncationKind::kAny;
}
@@ -125,10 +138,11 @@ bool IsWord(MachineRepresentation rep) {
} // namespace
-RepresentationChanger::RepresentationChanger(JSGraph* jsgraph, Isolate* isolate)
+RepresentationChanger::RepresentationChanger(JSGraph* jsgraph,
+ JSHeapBroker* broker)
: cache_(TypeCache::Get()),
jsgraph_(jsgraph),
- isolate_(isolate),
+ broker_(broker),
testing_type_errors_(false),
type_error_(false) {}
@@ -169,7 +183,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_node, use_info);
case MachineRepresentation::kTaggedPointer:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kHeapObject);
+ use_info.type_check() == TypeCheckKind::kHeapObject ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetTaggedPointerRepresentationFor(node, output_rep, output_type,
use_node, use_info);
case MachineRepresentation::kTagged:
@@ -207,7 +222,8 @@ Node* RepresentationChanger::GetRepresentationFor(
use_info);
case MachineRepresentation::kWord64:
DCHECK(use_info.type_check() == TypeCheckKind::kNone ||
- use_info.type_check() == TypeCheckKind::kSigned64);
+ use_info.type_check() == TypeCheckKind::kSigned64 ||
+ use_info.type_check() == TypeCheckKind::kBigInt);
return GetWord64RepresentationFor(node, output_rep, output_type, use_node,
use_info);
case MachineRepresentation::kSimd128:
@@ -418,6 +434,8 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
op = machine()->ChangeInt64ToFloat64();
node = jsgraph()->graph()->NewNode(op, node);
op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else if (output_type.Is(Type::BigInt())) {
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -447,16 +465,37 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
+ } else if (IsAnyTagged(output_rep) &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ if (output_type.Is(Type::BigInt())) {
+ return node;
+ }
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ if (use_info.type_check() == TypeCheckKind::kBigInt &&
+ !output_type.Is(Type::BigInt())) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
+ } else {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ }
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ output_type.Is(Type::BigInt())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed &&
+ use_info.type_check() == TypeCheckKind::kBigInt) {
+ node = InsertChangeCompressedToTagged(node);
+ op = simplified()->CheckBigInt(use_info.feedback());
} else if (CanBeCompressedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for CompressedSigned output representation.
+ op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for CompressedSigned output representation.
- op = simplified()->CheckedCompressedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -535,6 +574,9 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
} else if (output_type.Is(cache_->kSafeInteger)) {
// int64 -> tagged
op = simplified()->ChangeInt64ToTagged();
+ } else if (output_type.Is(Type::BigInt())) {
+ // uint64 -> BigInt
+ op = simplified()->ChangeUint64ToBigInt();
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
@@ -560,7 +602,7 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
op = simplified()->ChangeUint32ToTagged();
} else if (output_type.Is(Type::Number()) ||
(output_type.Is(Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64())) {
+ truncation.TruncatesOddballAndBigIntToNumber())) {
op = simplified()->ChangeFloat64ToTagged(
output_type.Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
@@ -569,7 +611,11 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTagged);
}
- } else if (IsAnyCompressed(output_rep)) {
+ } else if (output_rep == MachineRepresentation::kCompressedSigned) {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ } else if (output_rep == MachineRepresentation::kCompressedPointer) {
+ op = machine()->ChangeCompressedPointerToTaggedPointer();
+ } else if (output_rep == MachineRepresentation::kCompressed) {
op = machine()->ChangeCompressedToTagged();
} else {
return TypeError(node, output_rep, output_type,
@@ -606,9 +652,20 @@ Node* RepresentationChanger::GetCompressedSignedRepresentationFor(
use_node, use_info);
op = machine()->ChangeTaggedSignedToCompressedSigned();
} else if (IsWord(output_rep)) {
- node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
- use_node, use_info);
- op = machine()->ChangeTaggedSignedToCompressedSigned();
+ if (output_type.Is(Type::Signed31())) {
+ op = simplified()->ChangeInt31ToCompressedSigned();
+ } else if (output_type.Is(Type::Signed32())) {
+ if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+ op = simplified()->CheckedInt32ToCompressedSigned(use_info.feedback());
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kCompressedSigned);
+ }
+ } else {
+ node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = machine()->ChangeTaggedSignedToCompressedSigned();
+ }
} else if (output_rep == MachineRepresentation::kWord64) {
node = GetTaggedSignedRepresentationFor(node, output_rep, output_type,
use_node, use_info);
@@ -645,10 +702,11 @@ Node* RepresentationChanger::GetCompressedPointerRepresentationFor(
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type.Maybe(Type::SignedSmall())) {
op = machine()->ChangeTaggedPointerToCompressedPointer();
+ } else {
+ // TODO(turbofan): Consider adding a Bailout operator that just deopts
+ // for TaggedSigned output representation.
+ op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
}
- // TODO(turbofan): Consider adding a Bailout operator that just deopts
- // for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToCompressedPointer(use_info.feedback());
} else if (output_rep == MachineRepresentation::kBit) {
// TODO(v8:8977): specialize here and below
node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
@@ -810,11 +868,14 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
Node* use_node, UseInfo use_info) {
NumberMatcher m(node);
if (m.HasValue()) {
+ // BigInts are not used as number constants.
+ DCHECK(use_info.type_check() != TypeCheckKind::kBigInt);
switch (use_info.type_check()) {
case TypeCheckKind::kNone:
case TypeCheckKind::kNumber:
case TypeCheckKind::kNumberOrOddball:
return jsgraph()->Float64Constant(m.Value());
+ case TypeCheckKind::kBigInt:
case TypeCheckKind::kHeapObject:
case TypeCheckKind::kSigned32:
case TypeCheckKind::kSigned64:
@@ -843,9 +904,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kBit) {
CHECK(output_type.Is(Type::Boolean()));
- // TODO(tebbi): TypeCheckKind::kNumberOrOddball should imply Float64
- // truncation, since this exactly means that we treat Oddballs as Numbers.
- if (use_info.truncation().IsUsedAsFloat64() ||
+ if (use_info.truncation().TruncatesOddballAndBigIntToNumber() ||
use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -867,7 +926,7 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
} else if (output_type.Is(Type::Number())) {
op = simplified()->ChangeTaggedToFloat64();
} else if ((output_type.Is(Type::NumberOrOddball()) &&
- use_info.truncation().IsUsedAsFloat64()) ||
+ use_info.truncation().TruncatesOddballAndBigIntToNumber()) ||
output_type.Is(Type::NumberOrHole())) {
// JavaScript 'null' is an Oddball that results in +0 when truncated to
// Number. In a context like -0 == null, which must evaluate to false,
@@ -1063,11 +1122,15 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
output_type, use_node, use_info);
} else if (output_rep == MachineRepresentation::kCompressedSigned) {
// TODO(v8:8977): Specialise here
- op = machine()->ChangeCompressedSignedToTaggedSigned();
- node = jsgraph()->graph()->NewNode(op, node);
- return GetWord32RepresentationFor(node,
- MachineRepresentation::kTaggedSigned,
- output_type, use_node, use_info);
+ if (output_type.Is(Type::SignedSmall())) {
+ op = simplified()->ChangeCompressedSignedToInt32();
+ } else {
+ op = machine()->ChangeCompressedSignedToTaggedSigned();
+ node = jsgraph()->graph()->NewNode(op, node);
+ return GetWord32RepresentationFor(node,
+ MachineRepresentation::kTaggedSigned,
+ output_type, use_node, use_info);
+ }
} else if (output_rep == MachineRepresentation::kCompressedPointer) {
// TODO(v8:8977): Specialise here
op = machine()->ChangeCompressedPointerToTaggedPointer();
@@ -1252,6 +1315,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
}
break;
}
+ case IrOpcode::kHeapConstant: {
+ HeapObjectMatcher m(node);
+ if (m.HasValue() && m.Ref(broker_).IsBigInt()) {
+ auto bigint = m.Ref(broker_).AsBigInt();
+ return jsgraph()->Int64Constant(
+ static_cast<int64_t>(bigint.AsUint64()));
+ }
+ break;
+ }
default:
break;
}
@@ -1272,9 +1344,15 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
} else if (IsWord(output_rep)) {
- if (output_type.Is(Type::Unsigned32())) {
+ if (output_type.Is(Type::Unsigned32OrMinusZero())) {
+ // uint32 -> uint64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeUint32ToUint64();
- } else if (output_type.Is(Type::Signed32())) {
+ } else if (output_type.Is(Type::Signed32OrMinusZero())) {
+ // int32 -> int64
+ CHECK_IMPLIES(output_type.Maybe(Type::MinusZero()),
+ use_info.truncation().IdentifiesZeroAndMinusZero());
op = machine()->ChangeInt32ToInt64();
} else {
return TypeError(node, output_rep, output_type,
@@ -1323,6 +1401,13 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord64);
}
+ } else if (IsAnyTagged(output_rep) &&
+ use_info.truncation().IsUsedAsWord64() &&
+ (use_info.type_check() == TypeCheckKind::kBigInt ||
+ output_type.Is(Type::BigInt()))) {
+ node = GetTaggedPointerRepresentationFor(node, output_rep, output_type,
+ use_node, use_info);
+ op = simplified()->TruncateBigIntToUint64();
} else if (CanBeTaggedPointer(output_rep)) {
if (output_type.Is(cache_->kInt64)) {
op = simplified()->ChangeTaggedToInt64();
@@ -1656,6 +1741,13 @@ Node* RepresentationChanger::InsertTruncateInt64ToInt32(Node* node) {
return jsgraph()->graph()->NewNode(machine()->TruncateInt64ToInt32(), node);
}
+Node* RepresentationChanger::InsertChangeCompressedToTagged(Node* node) {
+ return jsgraph()->graph()->NewNode(machine()->ChangeCompressedToTagged(),
+ node);
+}
+
+Isolate* RepresentationChanger::isolate() const { return broker_->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index e8bb3f12ac..d338667603 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -29,8 +29,13 @@ class Truncation final {
static Truncation Word32() {
return Truncation(TruncationKind::kWord32, kIdentifyZeros);
}
- static Truncation Float64(IdentifyZeros identify_zeros = kDistinguishZeros) {
- return Truncation(TruncationKind::kFloat64, identify_zeros);
+ static Truncation Word64() {
+ return Truncation(TruncationKind::kWord64, kIdentifyZeros);
+ }
+ static Truncation OddballAndBigIntToNumber(
+ IdentifyZeros identify_zeros = kDistinguishZeros) {
+ return Truncation(TruncationKind::kOddballAndBigIntToNumber,
+ identify_zeros);
}
static Truncation Any(IdentifyZeros identify_zeros = kDistinguishZeros) {
return Truncation(TruncationKind::kAny, identify_zeros);
@@ -50,8 +55,11 @@ class Truncation final {
bool IsUsedAsWord32() const {
return LessGeneral(kind_, TruncationKind::kWord32);
}
- bool IsUsedAsFloat64() const {
- return LessGeneral(kind_, TruncationKind::kFloat64);
+ bool IsUsedAsWord64() const {
+ return LessGeneral(kind_, TruncationKind::kWord64);
+ }
+ bool TruncatesOddballAndBigIntToNumber() const {
+ return LessGeneral(kind_, TruncationKind::kOddballAndBigIntToNumber);
}
bool IdentifiesUndefinedAndZero() {
return LessGeneral(kind_, TruncationKind::kWord32) ||
@@ -81,13 +89,15 @@ class Truncation final {
kNone,
kBool,
kWord32,
- kFloat64,
+ kWord64,
+ kOddballAndBigIntToNumber,
kAny
};
explicit Truncation(TruncationKind kind, IdentifyZeros identify_zeros)
: kind_(kind), identify_zeros_(identify_zeros) {
- DCHECK(kind == TruncationKind::kAny || kind == TruncationKind::kFloat64 ||
+ DCHECK(kind == TruncationKind::kAny ||
+ kind == TruncationKind::kOddballAndBigIntToNumber ||
identify_zeros == kIdentifyZeros);
}
TruncationKind kind() const { return kind_; }
@@ -109,7 +119,8 @@ enum class TypeCheckKind : uint8_t {
kSigned64,
kNumber,
kNumberOrOddball,
- kHeapObject
+ kHeapObject,
+ kBigInt,
};
inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
@@ -128,6 +139,8 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
return os << "NumberOrOddball";
case TypeCheckKind::kHeapObject:
return os << "HeapObject";
+ case TypeCheckKind::kBigInt:
+ return os << "BigInt";
}
UNREACHABLE();
}
@@ -160,6 +173,13 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
+ static UseInfo TruncatingWord64() {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
+ }
+ static UseInfo CheckedBigIntTruncatingWord64(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(),
+ TypeCheckKind::kBigInt, feedback);
+ }
static UseInfo Word64() {
return UseInfo(MachineRepresentation::kWord64, Truncation::Any());
}
@@ -175,7 +195,7 @@ class UseInfo {
static UseInfo TruncatingFloat64(
IdentifyZeros identify_zeros = kDistinguishZeros) {
return UseInfo(MachineRepresentation::kFloat64,
- Truncation::Float64(identify_zeros));
+ Truncation::OddballAndBigIntToNumber(identify_zeros));
}
static UseInfo AnyTagged() {
return UseInfo(MachineRepresentation::kTagged, Truncation::Any());
@@ -203,6 +223,12 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject, feedback);
}
+
+ static UseInfo CheckedBigIntAsTaggedPointer(const VectorSlotPair& feedback) {
+ return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
+ TypeCheckKind::kBigInt, feedback);
+ }
+
static UseInfo CheckedSignedSmallAsTaggedSigned(
const VectorSlotPair& feedback,
IdentifyZeros identify_zeros = kDistinguishZeros) {
@@ -240,8 +266,6 @@ class UseInfo {
}
static UseInfo CheckedNumberOrOddballAsFloat64(
IdentifyZeros identify_zeros, const VectorSlotPair& feedback) {
- // TODO(tebbi): We should use Float64 truncation here, since this exactly
- // means that we treat Oddballs as Numbers.
return UseInfo(MachineRepresentation::kFloat64,
Truncation::Any(identify_zeros),
TypeCheckKind::kNumberOrOddball, feedback);
@@ -287,7 +311,7 @@ class UseInfo {
// Eagerly folds any representation changes for constants.
class V8_EXPORT_PRIVATE RepresentationChanger final {
public:
- RepresentationChanger(JSGraph* jsgraph, Isolate* isolate);
+ RepresentationChanger(JSGraph* jsgraph, JSHeapBroker* broker);
// Changes representation from {output_type} to {use_rep}. The {truncation}
// parameter is only used for sanity checking - if the changer cannot figure
@@ -317,7 +341,7 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
private:
TypeCache const* cache_;
JSGraph* jsgraph_;
- Isolate* isolate_;
+ JSHeapBroker* broker_;
friend class RepresentationChangerTester; // accesses the below fields.
@@ -371,12 +395,13 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* InsertChangeTaggedSignedToInt32(Node* node);
Node* InsertChangeTaggedToFloat64(Node* node);
Node* InsertChangeUint32ToFloat64(Node* node);
+ Node* InsertChangeCompressedToTagged(Node* node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
Node* InsertTruncateInt64ToInt32(Node* node);
Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
JSGraph* jsgraph() const { return jsgraph_; }
- Isolate* isolate() const { return isolate_; }
+ Isolate* isolate() const;
Factory* factory() const { return isolate()->factory(); }
SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index b57162f7f5..25919bb3b3 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/adapters.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph.h"
@@ -26,7 +27,7 @@ namespace compiler {
} while (false)
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint)
+ size_t node_count_hint, TickCounter* tick_counter)
: zone_(zone),
graph_(graph),
schedule_(schedule),
@@ -34,12 +35,14 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
- node_data_(zone) {
+ node_data_(zone),
+ tick_counter_(tick_counter) {
node_data_.reserve(node_count_hint);
node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
}
-Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter) {
Zone* schedule_zone =
(flags & Scheduler::kTempSchedule) ? zone : graph->zone();
@@ -50,7 +53,8 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Schedule* schedule =
new (schedule_zone) Schedule(schedule_zone, node_count_hint);
- Scheduler scheduler(zone, graph, schedule, flags, node_count_hint);
+ Scheduler scheduler(zone, graph, schedule, flags, node_count_hint,
+ tick_counter);
scheduler.BuildCFG();
scheduler.ComputeSpecialRPONumbering();
@@ -65,7 +69,6 @@ Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
return schedule;
}
-
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
SchedulerData def = {schedule_->start(), 0, kUnknown};
return def;
@@ -258,6 +261,7 @@ class CFGBuilder : public ZoneObject {
Queue(scheduler_->graph_->end());
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
int max = NodeProperties::PastControlIndex(node);
@@ -283,6 +287,7 @@ class CFGBuilder : public ZoneObject {
component_end_ = schedule_->block(exit);
scheduler_->equivalence_->Run(exit);
while (!queue_.empty()) { // Breadth-first backwards traversal.
+ scheduler_->tick_counter_->DoTick();
Node* node = queue_.front();
queue_.pop();
@@ -728,11 +733,10 @@ class SpecialRPONumberer : public ZoneObject {
}
};
- int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
- BasicBlock* child, int unvisited) {
+ int Push(int depth, BasicBlock* child, int unvisited) {
if (child->rpo_number() == unvisited) {
- stack[depth].block = child;
- stack[depth].index = 0;
+ stack_[depth].block = child;
+ stack_[depth].index = 0;
child->set_rpo_number(kBlockOnStack);
return depth + 1;
}
@@ -780,7 +784,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
previous_block_count_ = schedule_->BasicBlockCount();
- int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
+ int stack_depth = Push(0, entry, kBlockUnvisited1);
int num_loops = static_cast<int>(loops_.size());
while (stack_depth > 0) {
@@ -802,7 +806,7 @@ class SpecialRPONumberer : public ZoneObject {
} else {
// Push the successor onto the stack.
DCHECK_EQ(kBlockUnvisited1, succ->rpo_number());
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited1);
}
} else {
// Finished with all successors; pop the stack and add the block.
@@ -827,7 +831,7 @@ class SpecialRPONumberer : public ZoneObject {
// edges that lead out of loops. Visits each block once, but linking loop
// sections together is linear in the loop size, so overall is
// O(|B| + max(loop_depth) * max(|loop|))
- stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
+ stack_depth = Push(0, entry, kBlockUnvisited2);
while (stack_depth > 0) {
SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
BasicBlock* block = frame->block;
@@ -874,7 +878,7 @@ class SpecialRPONumberer : public ZoneObject {
loop->AddOutgoing(zone_, succ);
} else {
// Push the successor onto the stack.
- stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
+ stack_depth = Push(stack_depth, succ, kBlockUnvisited2);
if (HasLoopNumber(succ)) {
// Push the inner loop onto the loop stack.
DCHECK(GetLoopNumber(succ) < num_loops);
@@ -958,8 +962,9 @@ class SpecialRPONumberer : public ZoneObject {
}
// Computes loop membership from the backedges of the control flow graph.
- void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
- size_t num_loops, ZoneVector<Backedge>* backedges) {
+ void ComputeLoopInfo(
+ ZoneVector<SpecialRPOStackFrame>& queue, // NOLINT(runtime/references)
+ size_t num_loops, ZoneVector<Backedge>* backedges) {
// Extend existing loop membership vectors.
for (LoopInfo& loop : loops_) {
loop.members->Resize(static_cast<int>(schedule_->BasicBlockCount()),
@@ -1234,6 +1239,7 @@ void Scheduler::PrepareUses() {
visited[node->id()] = true;
stack.push(node->input_edges().begin());
while (!stack.empty()) {
+ tick_counter_->DoTick();
Edge edge = *stack.top();
Node* node = edge.to();
if (visited[node->id()]) {
@@ -1262,6 +1268,7 @@ class ScheduleEarlyNodeVisitor {
for (Node* const root : *roots) {
queue_.push(root);
while (!queue_.empty()) {
+ scheduler_->tick_counter_->DoTick();
VisitNode(queue_.front());
queue_.pop();
}
@@ -1388,6 +1395,7 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
+ scheduler_->tick_counter_->DoTick();
Node* const node = queue->front();
queue->pop();
VisitNode(node);
diff --git a/deps/v8/src/compiler/scheduler.h b/deps/v8/src/compiler/scheduler.h
index bd2f2780dd..3d1fa40025 100644
--- a/deps/v8/src/compiler/scheduler.h
+++ b/deps/v8/src/compiler/scheduler.h
@@ -15,6 +15,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -23,7 +26,6 @@ class ControlEquivalence;
class Graph;
class SpecialRPONumberer;
-
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class V8_EXPORT_PRIVATE Scheduler {
@@ -34,7 +36,8 @@ class V8_EXPORT_PRIVATE Scheduler {
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
- static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags);
+ static Schedule* ComputeSchedule(Zone* temp_zone, Graph* graph, Flags flags,
+ TickCounter* tick_counter);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
@@ -78,9 +81,10 @@ class V8_EXPORT_PRIVATE Scheduler {
CFGBuilder* control_flow_builder_; // Builds basic blocks for controls.
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
+ TickCounter* const tick_counter_;
Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
- size_t node_count_hint_);
+ size_t node_count_hint_, TickCounter* tick_counter);
inline SchedulerData DefaultSchedulerData();
inline SchedulerData* GetData(Node* node);
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index ecbd9cc030..5597850b06 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -6,30 +6,495 @@
#include <sstream>
+#include "src/base/optional.h"
+#include "src/compiler/access-info.h"
+#include "src/compiler/bytecode-analysis.h"
+#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/vector-slot-pair.h"
#include "src/handles/handles-inl.h"
+#include "src/ic/call-optimization.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/objects/code.h"
+#include "src/objects/js-array-inl.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
namespace compiler {
+#define CLEAR_ENVIRONMENT_LIST(V) \
+ V(CallRuntimeForPair) \
+ V(Debugger) \
+ V(ResumeGenerator) \
+ V(SuspendGenerator)
+
+#define KILL_ENVIRONMENT_LIST(V) \
+ V(Abort) \
+ V(ReThrow) \
+ V(Throw)
+
+#define CLEAR_ACCUMULATOR_LIST(V) \
+ V(Add) \
+ V(AddSmi) \
+ V(BitwiseAnd) \
+ V(BitwiseAndSmi) \
+ V(BitwiseNot) \
+ V(BitwiseOr) \
+ V(BitwiseOrSmi) \
+ V(BitwiseXor) \
+ V(BitwiseXorSmi) \
+ V(CallRuntime) \
+ V(CloneObject) \
+ V(CreateArrayFromIterable) \
+ V(CreateArrayLiteral) \
+ V(CreateEmptyArrayLiteral) \
+ V(CreateEmptyObjectLiteral) \
+ V(CreateMappedArguments) \
+ V(CreateObjectLiteral) \
+ V(CreateRegExpLiteral) \
+ V(CreateRestParameter) \
+ V(CreateUnmappedArguments) \
+ V(Dec) \
+ V(DeletePropertySloppy) \
+ V(DeletePropertyStrict) \
+ V(Div) \
+ V(DivSmi) \
+ V(Exp) \
+ V(ExpSmi) \
+ V(ForInContinue) \
+ V(ForInEnumerate) \
+ V(ForInNext) \
+ V(ForInStep) \
+ V(Inc) \
+ V(LdaLookupSlot) \
+ V(LdaLookupSlotInsideTypeof) \
+ V(LogicalNot) \
+ V(Mod) \
+ V(ModSmi) \
+ V(Mul) \
+ V(MulSmi) \
+ V(Negate) \
+ V(SetPendingMessage) \
+ V(ShiftLeft) \
+ V(ShiftLeftSmi) \
+ V(ShiftRight) \
+ V(ShiftRightLogical) \
+ V(ShiftRightLogicalSmi) \
+ V(ShiftRightSmi) \
+ V(StaLookupSlot) \
+ V(Sub) \
+ V(SubSmi) \
+ V(TestEqual) \
+ V(TestEqualStrict) \
+ V(TestGreaterThan) \
+ V(TestGreaterThanOrEqual) \
+ V(TestInstanceOf) \
+ V(TestLessThan) \
+ V(TestLessThanOrEqual) \
+ V(TestNull) \
+ V(TestReferenceEqual) \
+ V(TestTypeOf) \
+ V(TestUndefined) \
+ V(TestUndetectable) \
+ V(ToBooleanLogicalNot) \
+ V(ToName) \
+ V(ToNumber) \
+ V(ToNumeric) \
+ V(ToString) \
+ V(TypeOf)
+
+#define UNCONDITIONAL_JUMPS_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ V(JumpLoop)
+
+#define CONDITIONAL_JUMPS_LIST(V) \
+ V(JumpIfFalse) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotNull) \
+ V(JumpIfNotNullConstant) \
+ V(JumpIfNotUndefined) \
+ V(JumpIfNotUndefinedConstant) \
+ V(JumpIfNull) \
+ V(JumpIfNullConstant) \
+ V(JumpIfToBooleanFalse) \
+ V(JumpIfToBooleanFalseConstant) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfTrue) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfUndefined) \
+ V(JumpIfUndefinedConstant)
+
+#define IGNORED_BYTECODE_LIST(V) \
+ V(CallNoFeedback) \
+ V(IncBlockCounter) \
+ V(LdaNamedPropertyNoFeedback) \
+ V(StackCheck) \
+ V(StaNamedPropertyNoFeedback) \
+ V(ThrowReferenceErrorIfHole) \
+ V(ThrowSuperAlreadyCalledIfNotHole) \
+ V(ThrowSuperNotCalledIfHole)
+
+#define UNREACHABLE_BYTECODE_LIST(V) \
+ V(ExtraWide) \
+ V(Illegal) \
+ V(Wide)
+
+#define SUPPORTED_BYTECODE_LIST(V) \
+ V(CallAnyReceiver) \
+ V(CallJSRuntime) \
+ V(CallProperty) \
+ V(CallProperty0) \
+ V(CallProperty1) \
+ V(CallProperty2) \
+ V(CallUndefinedReceiver) \
+ V(CallUndefinedReceiver0) \
+ V(CallUndefinedReceiver1) \
+ V(CallUndefinedReceiver2) \
+ V(CallWithSpread) \
+ V(Construct) \
+ V(ConstructWithSpread) \
+ V(CreateBlockContext) \
+ V(CreateCatchContext) \
+ V(CreateClosure) \
+ V(CreateEvalContext) \
+ V(CreateFunctionContext) \
+ V(CreateWithContext) \
+ V(GetSuperConstructor) \
+ V(GetTemplateObject) \
+ V(InvokeIntrinsic) \
+ V(LdaConstant) \
+ V(LdaContextSlot) \
+ V(LdaCurrentContextSlot) \
+ V(LdaImmutableContextSlot) \
+ V(LdaImmutableCurrentContextSlot) \
+ V(LdaModuleVariable) \
+ V(LdaFalse) \
+ V(LdaGlobal) \
+ V(LdaGlobalInsideTypeof) \
+ V(LdaKeyedProperty) \
+ V(LdaLookupContextSlot) \
+ V(LdaLookupContextSlotInsideTypeof) \
+ V(LdaLookupGlobalSlot) \
+ V(LdaLookupGlobalSlotInsideTypeof) \
+ V(LdaNamedProperty) \
+ V(LdaNull) \
+ V(Ldar) \
+ V(LdaSmi) \
+ V(LdaTheHole) \
+ V(LdaTrue) \
+ V(LdaUndefined) \
+ V(LdaZero) \
+ V(Mov) \
+ V(PopContext) \
+ V(PushContext) \
+ V(Return) \
+ V(StaContextSlot) \
+ V(StaCurrentContextSlot) \
+ V(StaGlobal) \
+ V(StaInArrayLiteral) \
+ V(StaKeyedProperty) \
+ V(StaModuleVariable) \
+ V(StaNamedOwnProperty) \
+ V(StaNamedProperty) \
+ V(Star) \
+ V(SwitchOnGeneratorState) \
+ V(SwitchOnSmiNoFeedback) \
+ V(TestIn) \
+ CLEAR_ACCUMULATOR_LIST(V) \
+ CLEAR_ENVIRONMENT_LIST(V) \
+ CONDITIONAL_JUMPS_LIST(V) \
+ IGNORED_BYTECODE_LIST(V) \
+ KILL_ENVIRONMENT_LIST(V) \
+ UNCONDITIONAL_JUMPS_LIST(V) \
+ UNREACHABLE_BYTECODE_LIST(V)
+
+template <typename T>
+struct HandleComparator {
+ bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
+ return lhs.address() < rhs.address();
+ }
+};
+
+struct VirtualContext {
+ unsigned int distance;
+ Handle<Context> context;
+
+ VirtualContext(unsigned int distance_in, Handle<Context> context_in)
+ : distance(distance_in), context(context_in) {
+ CHECK_GT(distance, 0);
+ }
+ bool operator<(const VirtualContext& other) const {
+ return HandleComparator<Context>()(context, other.context) &&
+ distance < other.distance;
+ }
+};
+
+class FunctionBlueprint;
+using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
+using VirtualContextsSet = ZoneSet<VirtualContext>;
+using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
+using BlueprintsSet = ZoneSet<FunctionBlueprint>;
+
+class Hints {
+ public:
+ explicit Hints(Zone* zone);
+
+ const ConstantsSet& constants() const;
+ const MapsSet& maps() const;
+ const BlueprintsSet& function_blueprints() const;
+ const VirtualContextsSet& virtual_contexts() const;
+
+ void AddConstant(Handle<Object> constant);
+ void AddMap(Handle<Map> map);
+ void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
+ void AddVirtualContext(VirtualContext virtual_context);
+
+ void Add(const Hints& other);
+
+ void Clear();
+ bool IsEmpty() const;
+
+#ifdef ENABLE_SLOW_DCHECKS
+ bool Includes(Hints const& other) const;
+ bool Equals(Hints const& other) const;
+#endif
+
+ private:
+ VirtualContextsSet virtual_contexts_;
+ ConstantsSet constants_;
+ MapsSet maps_;
+ BlueprintsSet function_blueprints_;
+};
+
+using HintsVector = ZoneVector<Hints>;
+
+class FunctionBlueprint {
+ public:
+ FunctionBlueprint(Handle<JSFunction> function, Isolate* isolate, Zone* zone);
+
+ FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints);
+
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
+ Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
+ const Hints& context_hints() const { return context_hints_; }
+
+ bool operator<(const FunctionBlueprint& other) const {
+ // A feedback vector is never used for more than one SFI, so it can
+ // be used for strict ordering of blueprints.
+ DCHECK_IMPLIES(feedback_vector_.equals(other.feedback_vector_),
+ shared_.equals(other.shared_));
+ return HandleComparator<FeedbackVector>()(feedback_vector_,
+ other.feedback_vector_);
+ }
+
+ private:
+ Handle<SharedFunctionInfo> shared_;
+ Handle<FeedbackVector> feedback_vector_;
+ Hints context_hints_;
+};
+
+class CompilationSubject {
+ public:
+ explicit CompilationSubject(FunctionBlueprint blueprint)
+ : blueprint_(blueprint) {}
+
+ // The zone parameter is to correctly initialize the blueprint,
+ // which contains zone-allocated context information.
+ CompilationSubject(Handle<JSFunction> closure, Isolate* isolate, Zone* zone);
+
+ const FunctionBlueprint& blueprint() const { return blueprint_; }
+ MaybeHandle<JSFunction> closure() const { return closure_; }
+
+ private:
+ FunctionBlueprint blueprint_;
+ MaybeHandle<JSFunction> closure_;
+};
+
+// The SerializerForBackgroundCompilation makes sure that the relevant function
+// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
+// optimizations in the compiler, is copied to the heap broker.
+class SerializerForBackgroundCompilation {
+ public:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
+ Hints Run(); // NOTE: Returns empty for an already-serialized function.
+
+ class Environment;
+
+ private:
+ SerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ CompilationSubject function, base::Optional<Hints> new_target,
+ const HintsVector& arguments,
+ SerializerForBackgroundCompilationFlags flags);
+
+ bool BailoutOnUninitialized(FeedbackSlot slot);
+
+ void TraverseBytecode();
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(interpreter::BytecodeArrayIterator* iterator);
+ SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
+ const HintsVector& arguments, FeedbackSlot slot,
+ bool with_spread = false);
+ void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
+ ConvertReceiverMode receiver_mode,
+ bool with_spread = false);
+ void ProcessApiCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+ void ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, // NOLINT(runtime/references)
+ Handle<Map> receiver);
+ void ProcessBuiltinCall(Handle<SharedFunctionInfo> target,
+ const HintsVector& arguments);
+
+ void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
+
+ void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
+ AccessMode mode);
+ void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
+ FeedbackSlot slot, AccessMode mode);
+ void ProcessMapHintsForPromises(Hints const& receiver_hints);
+ void ProcessHintsForPromiseResolve(Hints const& resolution_hints);
+ void ProcessHintsForRegExpTest(Hints const& regexp_hints);
+ PropertyAccessInfo ProcessMapForRegExpTest(MapRef map);
+ void ProcessHintsForFunctionCall(Hints const& target_hints);
+
+ GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
+ NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
+ const MapHandles& maps, AccessMode mode, NameRef const& name);
+ ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode);
+ void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
+ base::Optional<NameRef> static_name);
+ void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
+
+ void ProcessCreateContext();
+ enum ContextProcessingMode {
+ kIgnoreSlot,
+ kSerializeSlot,
+ kSerializeSlotAndAddToAccumulator
+ };
+
+ void ProcessContextAccess(const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode);
+ void ProcessImmutableLoad(ContextRef& context, // NOLINT(runtime/references)
+ int slot, ContextProcessingMode mode);
+ void ProcessLdaLookupGlobalSlot(interpreter::BytecodeArrayIterator* iterator);
+ void ProcessLdaLookupContextSlot(
+ interpreter::BytecodeArrayIterator* iterator);
+
+ // Performs extension lookups for [0, depth) like
+ // BytecodeGraphBuilder::CheckContextExtensions().
+ void ProcessCheckContextExtensions(int depth);
+
+ Hints RunChildSerializer(CompilationSubject function,
+ base::Optional<Hints> new_target,
+ const HintsVector& arguments, bool with_spread);
+
+ // When (forward-)branching bytecodes are encountered, e.g. a conditional
+ // jump, we call ContributeToJumpTargetEnvironment to "remember" the current
+ // environment, associated with the jump target offset. When serialization
+ // eventually reaches that offset, we call IncorporateJumpTargetEnvironment to
+ // merge that environment back into whatever is the current environment then.
+ // Note: Since there may be multiple jumps to the same target,
+ // ContributeToJumpTargetEnvironment may actually do a merge as well.
+ void ContributeToJumpTargetEnvironment(int target_offset);
+ void IncorporateJumpTargetEnvironment(int target_offset);
+
+ Handle<BytecodeArray> bytecode_array() const;
+ BytecodeAnalysis const& GetBytecodeAnalysis(bool serialize);
+
+ JSHeapBroker* broker() const { return broker_; }
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+ Environment* environment() const { return environment_; }
+ SerializerForBackgroundCompilationFlags flags() const { return flags_; }
+ BailoutId osr_offset() const { return osr_offset_; }
+
+ JSHeapBroker* const broker_;
+ CompilationDependencies* const dependencies_;
+ Zone* const zone_;
+ Environment* const environment_;
+ ZoneUnorderedMap<int, Environment*> jump_target_environments_;
+ SerializerForBackgroundCompilationFlags const flags_;
+ BailoutId const osr_offset_;
+};
+
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset) {
+ SerializerForBackgroundCompilation serializer(broker, dependencies, zone,
+ closure, flags, osr_offset);
+ serializer.Run();
+}
+
using BytecodeArrayIterator = interpreter::BytecodeArrayIterator;
+FunctionBlueprint::FunctionBlueprint(Handle<SharedFunctionInfo> shared,
+ Handle<FeedbackVector> feedback_vector,
+ const Hints& context_hints)
+ : shared_(shared),
+ feedback_vector_(feedback_vector),
+ context_hints_(context_hints) {}
+
+FunctionBlueprint::FunctionBlueprint(Handle<JSFunction> function,
+ Isolate* isolate, Zone* zone)
+ : shared_(handle(function->shared(), isolate)),
+ feedback_vector_(handle(function->feedback_vector(), isolate)),
+ context_hints_(zone) {
+ context_hints_.AddConstant(handle(function->context(), isolate));
+}
+
CompilationSubject::CompilationSubject(Handle<JSFunction> closure,
- Isolate* isolate)
- : blueprint_{handle(closure->shared(), isolate),
- handle(closure->feedback_vector(), isolate)},
- closure_(closure) {
+ Isolate* isolate, Zone* zone)
+ : blueprint_(closure, isolate, zone), closure_(closure) {
CHECK(closure->has_feedback_vector());
}
Hints::Hints(Zone* zone)
- : constants_(zone), maps_(zone), function_blueprints_(zone) {}
+ : virtual_contexts_(zone),
+ constants_(zone),
+ maps_(zone),
+ function_blueprints_(zone) {}
+
+#ifdef ENABLE_SLOW_DCHECKS
+namespace {
+template <typename K, typename Compare>
+bool SetIncludes(ZoneSet<K, Compare> const& lhs,
+ ZoneSet<K, Compare> const& rhs) {
+ return std::all_of(rhs.cbegin(), rhs.cend(),
+ [&](K const& x) { return lhs.find(x) != lhs.cend(); });
+}
+} // namespace
+bool Hints::Includes(Hints const& other) const {
+ return SetIncludes(constants(), other.constants()) &&
+ SetIncludes(function_blueprints(), other.function_blueprints()) &&
+ SetIncludes(maps(), other.maps());
+}
+bool Hints::Equals(Hints const& other) const {
+ return this->Includes(other) && other.Includes(*this);
+}
+#endif
const ConstantsSet& Hints::constants() const { return constants_; }
@@ -39,6 +504,14 @@ const BlueprintsSet& Hints::function_blueprints() const {
return function_blueprints_;
}
+const VirtualContextsSet& Hints::virtual_contexts() const {
+ return virtual_contexts_;
+}
+
+void Hints::AddVirtualContext(VirtualContext virtual_context) {
+ virtual_contexts_.insert(virtual_context);
+}
+
void Hints::AddConstant(Handle<Object> constant) {
constants_.insert(constant);
}
@@ -53,16 +526,29 @@ void Hints::Add(const Hints& other) {
for (auto x : other.constants()) AddConstant(x);
for (auto x : other.maps()) AddMap(x);
for (auto x : other.function_blueprints()) AddFunctionBlueprint(x);
+ for (auto x : other.virtual_contexts()) AddVirtualContext(x);
}
bool Hints::IsEmpty() const {
- return constants().empty() && maps().empty() && function_blueprints().empty();
+ return constants().empty() && maps().empty() &&
+ function_blueprints().empty() && virtual_contexts().empty();
}
std::ostream& operator<<(std::ostream& out,
+ const VirtualContext& virtual_context) {
+ out << "Distance " << virtual_context.distance << " from "
+ << Brief(*virtual_context.context) << std::endl;
+ return out;
+}
+
+std::ostream& operator<<(std::ostream& out, const Hints& hints);
+
+std::ostream& operator<<(std::ostream& out,
const FunctionBlueprint& blueprint) {
- out << Brief(*blueprint.shared) << std::endl;
- out << Brief(*blueprint.feedback_vector) << std::endl;
+ out << Brief(*blueprint.shared()) << std::endl;
+ out << Brief(*blueprint.feedback_vector()) << std::endl;
+ !blueprint.context_hints().IsEmpty() && out << blueprint.context_hints()
+ << "):" << std::endl;
return out;
}
@@ -76,10 +562,14 @@ std::ostream& operator<<(std::ostream& out, const Hints& hints) {
for (FunctionBlueprint const& blueprint : hints.function_blueprints()) {
out << " blueprint " << blueprint << std::endl;
}
+ for (VirtualContext const& virtual_context : hints.virtual_contexts()) {
+ out << " virtual context " << virtual_context << std::endl;
+ }
return out;
}
void Hints::Clear() {
+ virtual_contexts_.clear();
constants_.clear();
maps_.clear();
function_blueprints_.clear();
@@ -92,50 +582,53 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
Environment(Zone* zone, Isolate* isolate, CompilationSubject function,
base::Optional<Hints> new_target, const HintsVector& arguments);
- bool IsDead() const { return environment_hints_.empty(); }
+ bool IsDead() const { return ephemeral_hints_.empty(); }
void Kill() {
DCHECK(!IsDead());
- environment_hints_.clear();
+ ephemeral_hints_.clear();
DCHECK(IsDead());
}
void Revive() {
DCHECK(IsDead());
- environment_hints_.resize(environment_hints_size(), Hints(zone()));
+ ephemeral_hints_.resize(ephemeral_hints_size(), Hints(zone()));
DCHECK(!IsDead());
}
- // When control flow bytecodes are encountered, e.g. a conditional jump,
- // the current environment needs to be stashed together with the target jump
- // address. Later, when this target bytecode is handled, the stashed
- // environment will be merged into the current one.
+ // Merge {other} into {this} environment (leaving {other} unmodified).
void Merge(Environment* other);
FunctionBlueprint function() const { return function_; }
+ Hints const& closure_hints() const { return closure_hints_; }
+ Hints const& current_context_hints() const { return current_context_hints_; }
+ Hints& current_context_hints() { return current_context_hints_; }
+ Hints const& return_value_hints() const { return return_value_hints_; }
+ Hints& return_value_hints() { return return_value_hints_; }
+
Hints& accumulator_hints() {
- CHECK_LT(accumulator_index(), environment_hints_.size());
- return environment_hints_[accumulator_index()];
+ CHECK_LT(accumulator_index(), ephemeral_hints_.size());
+ return ephemeral_hints_[accumulator_index()];
}
+
Hints& register_hints(interpreter::Register reg) {
+ if (reg.is_function_closure()) return closure_hints_;
+ if (reg.is_current_context()) return current_context_hints_;
int local_index = RegisterToLocalIndex(reg);
- CHECK_LT(local_index, environment_hints_.size());
- return environment_hints_[local_index];
+ CHECK_LT(local_index, ephemeral_hints_.size());
+ return ephemeral_hints_[local_index];
}
- Hints& return_value_hints() { return return_value_hints_; }
- // Clears all hints except those for the return value and the closure.
+ // Clears all hints except those for the context, return value, and the
+ // closure.
void ClearEphemeralHints() {
- DCHECK_EQ(environment_hints_.size(), function_closure_index() + 1);
- for (int i = 0; i < function_closure_index(); ++i) {
- environment_hints_[i].Clear();
- }
+ for (auto& hints : ephemeral_hints_) hints.Clear();
}
// Appends the hints for the given register range to {dst} (in order).
void ExportRegisterHints(interpreter::Register first, size_t count,
- HintsVector& dst);
+ HintsVector& dst); // NOLINT(runtime/references)
private:
friend std::ostream& operator<<(std::ostream& out, const Environment& env);
@@ -153,34 +646,39 @@ class SerializerForBackgroundCompilation::Environment : public ZoneObject {
int const parameter_count_;
int const register_count_;
- // environment_hints_ contains hints for the contents of the registers,
+ Hints closure_hints_;
+ Hints current_context_hints_;
+ Hints return_value_hints_;
+
+ // ephemeral_hints_ contains hints for the contents of the registers,
// the accumulator and the parameters. The layout is as follows:
- // [ parameters | registers | accumulator | context | closure ]
+ // [ parameters | registers | accumulator ]
// The first parameter is the receiver.
- HintsVector environment_hints_;
+ HintsVector ephemeral_hints_;
int accumulator_index() const { return parameter_count() + register_count(); }
- int current_context_index() const { return accumulator_index() + 1; }
- int function_closure_index() const { return current_context_index() + 1; }
- int environment_hints_size() const { return function_closure_index() + 1; }
-
- Hints return_value_hints_;
+ int ephemeral_hints_size() const { return accumulator_index() + 1; }
};
SerializerForBackgroundCompilation::Environment::Environment(
Zone* zone, CompilationSubject function)
: zone_(zone),
function_(function.blueprint()),
- parameter_count_(function_.shared->GetBytecodeArray().parameter_count()),
- register_count_(function_.shared->GetBytecodeArray().register_count()),
- environment_hints_(environment_hints_size(), Hints(zone), zone),
- return_value_hints_(zone) {
+ parameter_count_(
+ function_.shared()->GetBytecodeArray().parameter_count()),
+ register_count_(function_.shared()->GetBytecodeArray().register_count()),
+ closure_hints_(zone),
+ current_context_hints_(zone),
+ return_value_hints_(zone),
+ ephemeral_hints_(ephemeral_hints_size(), Hints(zone), zone) {
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
- environment_hints_[function_closure_index()].AddConstant(closure);
+ closure_hints_.AddConstant(closure);
} else {
- environment_hints_[function_closure_index()].AddFunctionBlueprint(
- function.blueprint());
+ closure_hints_.AddFunctionBlueprint(function.blueprint());
}
+
+ // Consume blueprint context hint information.
+ current_context_hints().Add(function.blueprint().context_hints());
}
SerializerForBackgroundCompilation::Environment::Environment(
@@ -191,18 +689,19 @@ SerializerForBackgroundCompilation::Environment::Environment(
// the parameter_count.
size_t param_count = static_cast<size_t>(parameter_count());
for (size_t i = 0; i < std::min(arguments.size(), param_count); ++i) {
- environment_hints_[i] = arguments[i];
+ ephemeral_hints_[i] = arguments[i];
}
// Pad the rest with "undefined".
Hints undefined_hint(zone);
undefined_hint.AddConstant(isolate->factory()->undefined_value());
for (size_t i = arguments.size(); i < param_count; ++i) {
- environment_hints_[i] = undefined_hint;
+ ephemeral_hints_[i] = undefined_hint;
}
interpreter::Register new_target_reg =
- function_.shared->GetBytecodeArray()
+ function_.shared()
+ ->GetBytecodeArray()
.incoming_new_target_or_generator_register();
if (new_target_reg.is_valid()) {
DCHECK(register_hints(new_target_reg).IsEmpty());
@@ -219,16 +718,20 @@ void SerializerForBackgroundCompilation::Environment::Merge(
CHECK_EQ(parameter_count(), other->parameter_count());
CHECK_EQ(register_count(), other->register_count());
+ SLOW_DCHECK(closure_hints_.Equals(other->closure_hints_));
+
if (IsDead()) {
- environment_hints_ = other->environment_hints_;
+ ephemeral_hints_ = other->ephemeral_hints_;
+ SLOW_DCHECK(return_value_hints_.Includes(other->return_value_hints_));
CHECK(!IsDead());
return;
}
- CHECK_EQ(environment_hints_.size(), other->environment_hints_.size());
- for (size_t i = 0; i < environment_hints_.size(); ++i) {
- environment_hints_[i].Add(other->environment_hints_[i]);
+ CHECK_EQ(ephemeral_hints_.size(), other->ephemeral_hints_.size());
+ for (size_t i = 0; i < ephemeral_hints_.size(); ++i) {
+ ephemeral_hints_[i].Add(other->ephemeral_hints_[i]);
}
+
return_value_hints_.Add(other->return_value_hints_);
}
@@ -236,42 +739,39 @@ std::ostream& operator<<(
std::ostream& out,
const SerializerForBackgroundCompilation::Environment& env) {
std::ostringstream output_stream;
+ output_stream << "Function ";
+ env.function_.shared()->Name().Print(output_stream);
- for (size_t i = 0; i << env.parameter_count(); ++i) {
- Hints const& hints = env.environment_hints_[i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for a" << i << ":\n" << hints;
- }
- }
- for (size_t i = 0; i << env.register_count(); ++i) {
- Hints const& hints = env.environment_hints_[env.parameter_count() + i];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for r" << i << ":\n" << hints;
- }
- }
- {
- Hints const& hints = env.environment_hints_[env.accumulator_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <accumulator>:\n" << hints;
+ if (env.IsDead()) {
+ output_stream << "dead\n";
+ } else {
+ output_stream << "alive\n";
+ for (int i = 0; i < static_cast<int>(env.ephemeral_hints_.size()); ++i) {
+ Hints const& hints = env.ephemeral_hints_[i];
+ if (!hints.IsEmpty()) {
+ if (i < env.parameter_count()) {
+ output_stream << "Hints for a" << i << ":\n";
+ } else if (i < env.parameter_count() + env.register_count()) {
+ int local_register = i - env.parameter_count();
+ output_stream << "Hints for r" << local_register << ":\n";
+ } else if (i == env.accumulator_index()) {
+ output_stream << "Hints for <accumulator>:\n";
+ } else {
+ UNREACHABLE();
+ }
+ output_stream << hints;
+ }
}
}
- {
- Hints const& hints = env.environment_hints_[env.function_closure_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <closure>:\n" << hints;
- }
+
+ if (!env.closure_hints().IsEmpty()) {
+ output_stream << "Hints for <closure>:\n" << env.closure_hints();
}
- {
- Hints const& hints = env.environment_hints_[env.current_context_index()];
- if (!hints.IsEmpty()) {
- output_stream << "Hints for <context>:\n" << hints;
- }
+ if (!env.current_context_hints().IsEmpty()) {
+ output_stream << "Hints for <context>:\n" << env.current_context_hints();
}
- {
- Hints const& hints = env.return_value_hints_;
- if (!hints.IsEmpty()) {
- output_stream << "Hints for {return value}:\n" << hints;
- }
+ if (!env.return_value_hints().IsEmpty()) {
+ output_stream << "Hints for {return value}:\n" << env.return_value_hints();
}
out << output_stream.str();
@@ -280,25 +780,26 @@ std::ostream& operator<<(
int SerializerForBackgroundCompilation::Environment::RegisterToLocalIndex(
interpreter::Register reg) const {
- // TODO(mslekova): We also want to gather hints for the context.
- if (reg.is_current_context()) return current_context_index();
- if (reg.is_function_closure()) return function_closure_index();
if (reg.is_parameter()) {
return reg.ToParameterIndex(parameter_count());
} else {
+ DCHECK(!reg.is_function_closure());
return parameter_count() + reg.index();
}
}
SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags)
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset)
: broker_(broker),
dependencies_(dependencies),
zone_(zone),
- environment_(new (zone) Environment(zone, {closure, broker_->isolate()})),
- stashed_environments_(zone),
- flags_(flags) {
+ environment_(new (zone) Environment(
+ zone, CompilationSubject(closure, broker_->isolate(), zone))),
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(osr_offset) {
JSFunctionRef(broker, closure).Serialize();
}
@@ -311,9 +812,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
zone_(zone),
environment_(new (zone) Environment(zone, broker_->isolate(), function,
new_target, arguments)),
- stashed_environments_(zone),
- flags_(flags) {
- DCHECK(!(flags_ & SerializerForBackgroundCompilationFlag::kOsr));
+ jump_target_environments_(zone),
+ flags_(flags),
+ osr_offset_(BailoutId::None()) {
TraceScope tracer(
broker_, this,
"SerializerForBackgroundCompilation::SerializerForBackgroundCompilation");
@@ -331,12 +832,12 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
SerializerForBackgroundCompilationFlag::kBailoutOnUninitialized)) {
return false;
}
- if (flags() & SerializerForBackgroundCompilationFlag::kOsr) {
+ if (!osr_offset().IsNone()) {
// Exclude OSR from this optimization because we might end up skipping the
// OSR entry point. TODO(neis): Support OSR?
return false;
}
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
if (!slot.IsInvalid() && nexus.IsUninitialized()) {
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) {
@@ -354,9 +855,9 @@ bool SerializerForBackgroundCompilation::BailoutOnUninitialized(
Hints SerializerForBackgroundCompilation::Run() {
TraceScope tracer(broker(), this, "SerializerForBackgroundCompilation::Run");
- SharedFunctionInfoRef shared(broker(), environment()->function().shared);
- FeedbackVectorRef feedback_vector(broker(),
- environment()->function().feedback_vector);
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
if (shared.IsSerializedForCompilation(feedback_vector)) {
TRACE_BROKER(broker(), "Already ran serializer for SharedFunctionInfo "
<< Brief(*shared.object())
@@ -382,9 +883,10 @@ Hints SerializerForBackgroundCompilation::Run() {
class ExceptionHandlerMatcher {
public:
explicit ExceptionHandlerMatcher(
- BytecodeArrayIterator const& bytecode_iterator)
+ BytecodeArrayIterator const& bytecode_iterator,
+ Handle<BytecodeArray> bytecode_array)
: bytecode_iterator_(bytecode_iterator) {
- HandlerTable table(*bytecode_iterator_.bytecode_array());
+ HandlerTable table(*bytecode_array);
for (int i = 0, n = table.NumberOfRangeEntries(); i < n; ++i) {
handlers_.insert(table.GetRangeHandler(i));
}
@@ -407,30 +909,53 @@ class ExceptionHandlerMatcher {
std::set<int>::const_iterator handlers_iterator_;
};
+Handle<BytecodeArray> SerializerForBackgroundCompilation::bytecode_array()
+ const {
+ return handle(environment()->function().shared()->GetBytecodeArray(),
+ broker()->isolate());
+}
+
+BytecodeAnalysis const& SerializerForBackgroundCompilation::GetBytecodeAnalysis(
+ bool serialize) {
+ return broker()->GetBytecodeAnalysis(
+ bytecode_array(), osr_offset(),
+ flags() &
+ SerializerForBackgroundCompilationFlag::kAnalyzeEnvironmentLiveness,
+ serialize);
+}
+
void SerializerForBackgroundCompilation::TraverseBytecode() {
- BytecodeArrayRef bytecode_array(
- broker(), handle(environment()->function().shared->GetBytecodeArray(),
- broker()->isolate()));
- BytecodeArrayIterator iterator(bytecode_array.object());
- ExceptionHandlerMatcher handler_matcher(iterator);
+ BytecodeAnalysis const& bytecode_analysis = GetBytecodeAnalysis(true);
+ BytecodeArrayRef(broker(), bytecode_array()).SerializeForCompilation();
+
+ BytecodeArrayIterator iterator(bytecode_array());
+ ExceptionHandlerMatcher handler_matcher(iterator, bytecode_array());
for (; !iterator.done(); iterator.Advance()) {
- MergeAfterJump(&iterator);
+ int const current_offset = iterator.current_offset();
+ IncorporateJumpTargetEnvironment(current_offset);
+
+ TRACE_BROKER(broker(),
+ "Handling bytecode: " << current_offset << " "
+ << iterator.current_bytecode());
+ TRACE_BROKER(broker(), "Current environment: " << *environment());
if (environment()->IsDead()) {
- if (iterator.current_bytecode() ==
- interpreter::Bytecode::kResumeGenerator ||
- handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
+ if (handler_matcher.CurrentBytecodeIsExceptionHandlerStart()) {
environment()->Revive();
} else {
continue; // Skip this bytecode since TF won't generate code for it.
}
}
- TRACE_BROKER(broker(),
- "Handling bytecode: " << iterator.current_offset() << " "
- << iterator.current_bytecode());
- TRACE_BROKER(broker(), "Current environment:\n" << *environment());
+ if (bytecode_analysis.IsLoopHeader(current_offset)) {
+ // Graph builder might insert jumps to resume targets in the loop body.
+ LoopInfo const& loop_info =
+ bytecode_analysis.GetLoopInfoFor(current_offset);
+ for (const auto& target : loop_info.resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+ }
switch (iterator.current_bytecode()) {
#define DEFINE_BYTECODE_CASE(name) \
@@ -447,21 +972,6 @@ void SerializerForBackgroundCompilation::TraverseBytecode() {
}
}
-void SerializerForBackgroundCompilation::VisitIllegal(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
-void SerializerForBackgroundCompilation::VisitExtraWide(
- BytecodeArrayIterator* iterator) {
- UNREACHABLE();
-}
-
void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
BytecodeArrayIterator* iterator) {
interpreter::Register dst = iterator->GetRegisterOperand(0);
@@ -480,6 +990,20 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
}
}
+void SerializerForBackgroundCompilation::VisitGetTemplateObject(
+ BytecodeArrayIterator* iterator) {
+ ObjectRef description(
+ broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ FeedbackSlot slot = iterator->GetSlotOperand(1);
+ FeedbackVectorRef feedback_vector(
+ broker(), environment()->function().feedback_vector());
+ SharedFunctionInfoRef shared(broker(), environment()->function().shared());
+ JSArrayRef template_object =
+ shared.GetTemplateObject(description, feedback_vector, slot, true);
+ environment()->accumulator_hints().Clear();
+ environment()->accumulator_hints().AddConstant(template_object.object());
+}
+
void SerializerForBackgroundCompilation::VisitLdaTrue(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
@@ -529,11 +1053,171 @@ void SerializerForBackgroundCompilation::VisitLdaSmi(
Smi::FromInt(iterator->GetImmediateOperand(0)), broker()->isolate()));
}
+void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
+ BytecodeArrayIterator* iterator) {
+ Runtime::FunctionId functionId = iterator->GetIntrinsicIdOperand(0);
+ // For JSNativeContextSpecialization::ReduceJSAsyncFunctionResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (functionId == Runtime::kInlineAsyncFunctionResolve) {
+ interpreter::Register first_reg = iterator->GetRegisterOperand(1);
+ size_t reg_count = iterator->GetRegisterCountOperand(2);
+ CHECK_EQ(reg_count, 3);
+ HintsVector arguments(zone());
+ environment()->ExportRegisterHints(first_reg, reg_count, arguments);
+ Hints const& resolution_hints = arguments[1]; // The resolution object.
+ ProcessHintsForPromiseResolve(resolution_hints);
+ environment()->accumulator_hints().Clear();
+ return;
+ }
+ environment()->ClearEphemeralHints();
+}
+
void SerializerForBackgroundCompilation::VisitLdaConstant(
BytecodeArrayIterator* iterator) {
environment()->accumulator_hints().Clear();
environment()->accumulator_hints().AddConstant(
- handle(iterator->GetConstantForIndexOperand(0), broker()->isolate()));
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+}
+
+void SerializerForBackgroundCompilation::VisitPushContext(
+ BytecodeArrayIterator* iterator) {
+ // Transfer current context hints to the destination register hints.
+ Hints& current_context_hints = environment()->current_context_hints();
+ Hints& saved_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ saved_context_hints.Clear();
+ saved_context_hints.Add(current_context_hints);
+
+ // New Context is in the accumulator. Put those hints into the current context
+ // register hints.
+ current_context_hints.Clear();
+ current_context_hints.Add(environment()->accumulator_hints());
+}
+
+void SerializerForBackgroundCompilation::VisitPopContext(
+ BytecodeArrayIterator* iterator) {
+ // Replace current context hints with hints given in the argument register.
+ Hints& new_context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->current_context_hints().Clear();
+ environment()->current_context_hints().Add(new_context_hints);
+}
+
+void SerializerForBackgroundCompilation::ProcessImmutableLoad(
+ ContextRef& context_ref, int slot, ContextProcessingMode mode) {
+ DCHECK(mode == kSerializeSlot || mode == kSerializeSlotAndAddToAccumulator);
+ base::Optional<ObjectRef> slot_value = context_ref.get(slot, true);
+
+ // Also, put the object into the constant hints for the accumulator.
+ if (mode == kSerializeSlotAndAddToAccumulator && slot_value.has_value()) {
+ environment()->accumulator_hints().AddConstant(slot_value.value().object());
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessContextAccess(
+ const Hints& context_hints, int slot, int depth,
+ ContextProcessingMode mode) {
+ // This function is for JSContextSpecialization::ReduceJSLoadContext and
+ // ReduceJSStoreContext. Those reductions attempt to eliminate as many
+ // loads as possible by making use of constant Context objects. In the
+ // case of an immutable load, ReduceJSLoadContext even attempts to load
+ // the value at {slot}, replacing the load with a constant.
+ for (auto x : context_hints.constants()) {
+ if (x->IsContext()) {
+ // Walk this context to the given depth and serialize the slot found.
+ ContextRef context_ref(broker(), x);
+ size_t remaining_depth = depth;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+ for (auto x : context_hints.virtual_contexts()) {
+ if (x.distance <= static_cast<unsigned int>(depth)) {
+ ContextRef context_ref(broker(), x.context);
+ size_t remaining_depth = depth - x.distance;
+ context_ref = context_ref.previous(&remaining_depth, true);
+ if (remaining_depth == 0 && mode != kIgnoreSlot) {
+ ProcessImmutableLoad(context_ref, slot, mode);
+ }
+ }
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitLdaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& context_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaImmutableCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot, depth,
+ kSerializeSlotAndAddToAccumulator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+
+ // TODO(mvstanton): If we have a constant module, should we serialize the
+ // cell as well? Then we could put the value in the accumulator.
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaModuleVariable(
+ BytecodeArrayIterator* iterator) {
+ const int depth = iterator->GetUnsignedImmediateOperand(1);
+ ProcessContextAccess(environment()->current_context_hints(),
+ Context::EXTENSION_INDEX, depth, kSerializeSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ Hints& register_hints =
+ environment()->register_hints(iterator->GetRegisterOperand(0));
+ ProcessContextAccess(register_hints, slot, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitStaCurrentContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot = iterator->GetIndexOperand(0);
+ const int depth = 0;
+ Hints& context_hints = environment()->current_context_hints();
+ ProcessContextAccess(context_hints, slot, depth, kIgnoreSlot);
}
void SerializerForBackgroundCompilation::VisitLdar(
@@ -558,14 +1242,60 @@ void SerializerForBackgroundCompilation::VisitMov(
environment()->register_hints(dst).Add(environment()->register_hints(src));
}
+void SerializerForBackgroundCompilation::VisitCreateFunctionContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateBlockContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateEvalContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateWithContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::VisitCreateCatchContext(
+ BytecodeArrayIterator* iterator) {
+ ProcessCreateContext();
+}
+
+void SerializerForBackgroundCompilation::ProcessCreateContext() {
+ Hints& accumulator_hints = environment()->accumulator_hints();
+ accumulator_hints.Clear();
+ Hints& current_context_hints = environment()->current_context_hints();
+
+ // For each constant context, we must create a virtual context from
+ // it of distance one.
+ for (auto x : current_context_hints.constants()) {
+ if (x->IsContext()) {
+ Handle<Context> as_context(Handle<Context>::cast(x));
+ accumulator_hints.AddVirtualContext(VirtualContext(1, as_context));
+ }
+ }
+
+ // For each virtual context, we must create a virtual context from
+ // it of distance {existing distance} + 1.
+ for (auto x : current_context_hints.virtual_contexts()) {
+ accumulator_hints.AddVirtualContext(
+ VirtualContext(x.distance + 1, x.context));
+ }
+}
+
void SerializerForBackgroundCompilation::VisitCreateClosure(
BytecodeArrayIterator* iterator) {
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(iterator->GetConstantForIndexOperand(0)),
- broker()->isolate());
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
Handle<FeedbackCell> feedback_cell =
- environment()->function().feedback_vector->GetClosureFeedbackCell(
+ environment()->function().feedback_vector()->GetClosureFeedbackCell(
iterator->GetIndexOperand(1));
FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
@@ -573,8 +1303,13 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
environment()->accumulator_hints().Clear();
if (cell_value->IsFeedbackVector()) {
- environment()->accumulator_hints().AddFunctionBlueprint(
- {shared, Handle<FeedbackVector>::cast(cell_value)});
+ // Gather the context hints from the current context register hint
+ // structure.
+ FunctionBlueprint blueprint(shared,
+ Handle<FeedbackVector>::cast(cell_value),
+ environment()->current_context_hints());
+
+ environment()->accumulator_hints().AddFunctionBlueprint(blueprint);
}
}
@@ -685,6 +1420,16 @@ void SerializerForBackgroundCompilation::VisitCallWithSpread(
ProcessCallVarArgs(iterator, ConvertReceiverMode::kAny, true);
}
+void SerializerForBackgroundCompilation::VisitCallJSRuntime(
+ BytecodeArrayIterator* iterator) {
+ environment()->accumulator_hints().Clear();
+
+ // BytecodeGraphBuilder::VisitCallJSRuntime needs the {runtime_index}
+ // slot in the native context to be serialized.
+ const int runtime_index = iterator->GetNativeContextIndexOperand(0);
+ broker()->native_context().get(runtime_index, true);
+}
+
Hints SerializerForBackgroundCompilation::RunChildSerializer(
CompilationSubject function, base::Optional<Hints> new_target,
const HintsVector& arguments, bool with_spread) {
@@ -700,14 +1445,14 @@ Hints SerializerForBackgroundCompilation::RunChildSerializer(
padded.pop_back(); // Remove the spread element.
// Fill the rest with empty hints.
padded.resize(
- function.blueprint().shared->GetBytecodeArray().parameter_count(),
+ function.blueprint().shared()->GetBytecodeArray().parameter_count(),
Hints(zone()));
return RunChildSerializer(function, new_target, padded, false);
}
SerializerForBackgroundCompilation child_serializer(
broker(), dependencies(), zone(), function, new_target, arguments,
- flags().without(SerializerForBackgroundCompilationFlag::kOsr));
+ flags());
return child_serializer.Run();
}
@@ -734,7 +1479,7 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
// Incorporate feedback into hints.
base::Optional<HeapObjectRef> feedback = GetHeapObjectFeedback(
- broker(), environment()->function().feedback_vector, slot);
+ broker(), environment()->function().feedback_vector(), slot);
if (feedback.has_value() && feedback->map().is_callable()) {
if (new_target.has_value()) {
// Construct; feedback is new_target, which often is also the callee.
@@ -752,15 +1497,37 @@ void SerializerForBackgroundCompilation::ProcessCallOrConstruct(
if (!hint->IsJSFunction()) continue;
Handle<JSFunction> function = Handle<JSFunction>::cast(hint);
- if (!function->shared().IsInlineable() || !function->has_feedback_vector())
- continue;
+ JSFunctionRef(broker(), function).Serialize();
+
+ Handle<SharedFunctionInfo> shared(function->shared(), broker()->isolate());
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable() || !function->has_feedback_vector()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
- {function, broker()->isolate()}, new_target, arguments, with_spread));
+ CompilationSubject(function, broker()->isolate(), zone()), new_target,
+ arguments, with_spread));
}
for (auto hint : callee.function_blueprints()) {
- if (!hint.shared->IsInlineable()) continue;
+ Handle<SharedFunctionInfo> shared = hint.shared();
+
+ if (shared->IsApiFunction()) {
+ ProcessApiCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ } else if (shared->HasBuiltinId()) {
+ ProcessBuiltinCall(shared, arguments);
+ DCHECK(!shared->IsInlineable());
+ }
+
+ if (!shared->IsInlineable()) continue;
environment()->accumulator_hints().Add(RunChildSerializer(
CompilationSubject(hint), new_target, arguments, with_spread));
}
@@ -788,22 +1555,222 @@ void SerializerForBackgroundCompilation::ProcessCallVarArgs(
ProcessCallOrConstruct(callee, base::nullopt, arguments, slot);
}
-void SerializerForBackgroundCompilation::ProcessJump(
- interpreter::BytecodeArrayIterator* iterator) {
- int jump_target = iterator->GetJumpTargetOffset();
- int current_offset = iterator->current_offset();
- if (current_offset >= jump_target) return;
+void SerializerForBackgroundCompilation::ProcessApiCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ FunctionTemplateInfoRef target_template_info(
+ broker(), handle(target->function_data(), broker()->isolate()));
+ if (!target_template_info.has_call_code()) return;
+
+ target_template_info.SerializeCallCode();
+
+ SharedFunctionInfoRef target_ref(broker(), target);
+ target_ref.SerializeFunctionTemplateInfo();
+
+ if (target_template_info.accept_any_receiver() &&
+ target_template_info.is_signature_undefined())
+ return;
- stashed_environments_[jump_target] = new (zone()) Environment(*environment());
+ CHECK_GE(arguments.size(), 1);
+ Hints const& receiver_hints = arguments[0];
+ for (auto hint : receiver_hints.constants()) {
+ if (hint->IsUndefined()) {
+ // The receiver is the global proxy.
+ Handle<JSGlobalProxy> global_proxy =
+ broker()->native_context().global_proxy_object().object();
+ ProcessReceiverMapForApiCall(
+ target_template_info,
+ handle(global_proxy->map(), broker()->isolate()));
+ continue;
+ }
+
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+
+ ProcessReceiverMapForApiCall(target_template_info,
+ handle(receiver->map(), broker()->isolate()));
+ }
+
+ for (auto receiver_map : receiver_hints.maps()) {
+ ProcessReceiverMapForApiCall(target_template_info, receiver_map);
+ }
}
-void SerializerForBackgroundCompilation::MergeAfterJump(
+void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
+ FunctionTemplateInfoRef& target, Handle<Map> receiver) {
+ if (receiver->is_access_check_needed()) {
+ return;
+ }
+
+ MapRef receiver_map(broker(), receiver);
+ TRACE_BROKER(broker(), "Serializing holder for target:" << target);
+
+ target.LookupHolderOfExpectedType(receiver_map, true);
+}
+
+void SerializerForBackgroundCompilation::ProcessBuiltinCall(
+ Handle<SharedFunctionInfo> target, const HintsVector& arguments) {
+ DCHECK(target->HasBuiltinId());
+ const int builtin_id = target->builtin_id();
+ const char* name = Builtins::name(builtin_id);
+ TRACE_BROKER(broker(), "Serializing for call to builtin " << name);
+ switch (builtin_id) {
+ case Builtins::kPromisePrototypeCatch: {
+ // For JSCallReducer::ReducePromisePrototypeCatch.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeFinally: {
+ // For JSCallReducer::ReducePromisePrototypeFinally.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromisePrototypeThen: {
+ // For JSCallReducer::ReducePromisePrototypeThen.
+ CHECK_GE(arguments.size(), 1);
+ ProcessMapHintsForPromises(arguments[0]);
+ break;
+ }
+ case Builtins::kPromiseResolveTrampoline:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 2) {
+ Hints const& resolution_hints = arguments[1];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kPromiseInternalResolve:
+ // For JSCallReducer::ReducePromiseInternalResolve and
+ // JSNativeContextSpecialization::ReduceJSResolvePromise.
+ if (arguments.size() >= 3) {
+ Hints const& resolution_hints = arguments[2];
+ ProcessHintsForPromiseResolve(resolution_hints);
+ }
+ break;
+ case Builtins::kRegExpPrototypeTest: {
+ // For JSCallReducer::ReduceRegExpPrototypeTest.
+ if (arguments.size() >= 1) {
+ Hints const& regexp_hints = arguments[0];
+ ProcessHintsForRegExpTest(regexp_hints);
+ }
+ break;
+ }
+ case Builtins::kFunctionPrototypeCall:
+ if (arguments.size() >= 1) {
+ Hints const& target_hints = arguments[0];
+ ProcessHintsForFunctionCall(target_hints);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
+ Hints const& resolution_hints) {
+ auto processMap = [&](Handle<Map> map) {
+ broker()->CreateAccessInfoForLoadingThen(MapRef(broker(), map),
+ dependencies());
+ };
+
+ for (auto hint : resolution_hints.constants()) {
+ if (!hint->IsJSReceiver()) continue;
+ Handle<JSReceiver> receiver(Handle<JSReceiver>::cast(hint));
+ processMap(handle(receiver->map(), broker()->isolate()));
+ }
+ for (auto map_hint : resolution_hints.maps()) {
+ processMap(map_hint);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
+ Hints const& receiver_hints) {
+ // We need to serialize the prototypes on each receiver map.
+ for (auto constant : receiver_hints.constants()) {
+ if (!constant->IsJSPromise()) continue;
+ Handle<Map> map(Handle<HeapObject>::cast(constant)->map(),
+ broker()->isolate());
+ MapRef(broker(), map).SerializePrototype();
+ }
+ for (auto map : receiver_hints.maps()) {
+ if (!map->IsJSPromiseMap()) continue;
+ MapRef(broker(), map).SerializePrototype();
+ }
+}
+
+PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
+ MapRef map) {
+ PropertyAccessInfo ai_exec =
+ broker()->CreateAccessInfoForLoadingExec(map, dependencies());
+
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the prototype chain.
+ JSObjectRef holder_ref(broker(), holder);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ return ai_exec;
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
+ Hints const& regexp_hints) {
+ for (auto hint : regexp_hints.constants()) {
+ if (!hint->IsJSRegExp()) continue;
+ Handle<JSRegExp> regexp(Handle<JSRegExp>::cast(hint));
+ Handle<Map> regexp_map(regexp->map(), broker()->isolate());
+ PropertyAccessInfo ai_exec =
+ ProcessMapForRegExpTest(MapRef(broker(), regexp_map));
+ Handle<JSObject> holder;
+ if (ai_exec.IsDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
+ // The property is on the object itself.
+ JSObjectRef holder_ref(broker(), regexp);
+ holder_ref.GetOwnProperty(ai_exec.field_representation(),
+ ai_exec.field_index(), true);
+ }
+ }
+
+ for (auto map : regexp_hints.maps()) {
+ if (!map->IsJSRegExpMap()) continue;
+ ProcessMapForRegExpTest(MapRef(broker(), map));
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessHintsForFunctionCall(
+ Hints const& target_hints) {
+ for (auto constant : target_hints.constants()) {
+ if (!constant->IsJSFunction()) continue;
+ JSFunctionRef func(broker(), constant);
+ func.Serialize();
+ }
+}
+
+void SerializerForBackgroundCompilation::ContributeToJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it == jump_target_environments_.end()) {
+ jump_target_environments_[target_offset] =
+ new (zone()) Environment(*environment());
+ } else {
+ it->second->Merge(environment());
+ }
+}
+
+void SerializerForBackgroundCompilation::IncorporateJumpTargetEnvironment(
+ int target_offset) {
+ auto it = jump_target_environments_.find(target_offset);
+ if (it != jump_target_environments_.end()) {
+ environment()->Merge(it->second);
+ jump_target_environments_.erase(it);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessJump(
interpreter::BytecodeArrayIterator* iterator) {
- int current_offset = iterator->current_offset();
- auto stash = stashed_environments_.find(current_offset);
- if (stash != stashed_environments_.end()) {
- environment()->Merge(stash->second);
- stashed_environments_.erase(stash);
+ int jump_target = iterator->GetJumpTargetOffset();
+ if (iterator->current_offset() < jump_target) {
+ ContributeToJumpTargetEnvironment(jump_target);
}
}
@@ -813,10 +1780,25 @@ void SerializerForBackgroundCompilation::VisitReturn(
environment()->ClearEphemeralHints();
}
+void SerializerForBackgroundCompilation::VisitSwitchOnSmiNoFeedback(
+ interpreter::BytecodeArrayIterator* iterator) {
+ interpreter::JumpTableTargetOffsets targets =
+ iterator->GetJumpTableTargetOffsets();
+ for (const auto& target : targets) {
+ ContributeToJumpTargetEnvironment(target.target_offset);
+ }
+}
+
+void SerializerForBackgroundCompilation::VisitSwitchOnGeneratorState(
+ interpreter::BytecodeArrayIterator* iterator) {
+ for (const auto& target : GetBytecodeAnalysis(false).resume_jump_targets()) {
+ ContributeToJumpTargetEnvironment(target.target_offset());
+ }
+}
+
void SerializerForBackgroundCompilation::Environment::ExportRegisterHints(
interpreter::Register first, size_t count, HintsVector& dst) {
- dst.resize(dst.size() + count, Hints(zone()));
- int reg_base = first.index();
+ const int reg_base = first.index();
for (int i = 0; i < static_cast<int>(count); ++i) {
dst.push_back(register_hints(interpreter::Register(reg_base + i)));
}
@@ -856,8 +1838,8 @@ GlobalAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackForGlobalAccess(
FeedbackSlot slot) {
if (slot.IsInvalid()) return nullptr;
- if (environment()->function().feedback_vector.is_null()) return nullptr;
- FeedbackSource source(environment()->function().feedback_vector, slot);
+ if (environment()->function().feedback_vector().is_null()) return nullptr;
+ FeedbackSource source(environment()->function().feedback_vector(), slot);
if (broker()->HasFeedback(source)) {
return broker()->GetGlobalAccessFeedback(source);
@@ -889,14 +1871,31 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
VisitLdaGlobal(iterator);
}
-void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
+ int depth) {
+ // for BytecodeGraphBuilder::CheckContextExtensions.
+ Hints& context_hints = environment()->current_context_hints();
+ for (int i = 0; i < depth; i++) {
+ ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i,
+ kSerializeSlot);
+ }
+}
+
+void SerializerForBackgroundCompilation::ProcessLdaLookupGlobalSlot(
BytecodeArrayIterator* iterator) {
+ ProcessCheckContextExtensions(iterator->GetUnsignedImmediateOperand(2));
+ // TODO(neis): BytecodeGraphBilder may insert a JSLoadGlobal.
VisitLdaGlobal(iterator);
}
+void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupGlobalSlot(iterator);
+}
+
void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
BytecodeArrayIterator* iterator) {
- VisitLdaGlobal(iterator);
+ ProcessLdaLookupGlobalSlot(iterator);
}
void SerializerForBackgroundCompilation::VisitStaGlobal(
@@ -905,6 +1904,26 @@ void SerializerForBackgroundCompilation::VisitStaGlobal(
ProcessFeedbackForGlobalAccess(slot);
}
+void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ const int slot_index = iterator->GetIndexOperand(1);
+ const int depth = iterator->GetUnsignedImmediateOperand(2);
+ ProcessCheckContextExtensions(depth);
+ Hints& context_hints = environment()->current_context_hints();
+ environment()->accumulator_hints().Clear();
+ ProcessContextAccess(context_hints, slot_index, depth, kIgnoreSlot);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlot(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
+void SerializerForBackgroundCompilation::VisitLdaLookupContextSlotInsideTypeof(
+ BytecodeArrayIterator* iterator) {
+ ProcessLdaLookupContextSlot(iterator);
+}
+
namespace {
template <class MapContainer>
MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
@@ -922,9 +1941,10 @@ MapHandles GetRelevantReceiverMaps(Isolate* isolate, MapContainer const& maps) {
ElementAccessFeedback const*
SerializerForBackgroundCompilation::ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode) {
+ const MapHandles& maps, AccessMode mode,
+ KeyedAccessMode const& keyed_mode) {
ElementAccessFeedback const* result =
- broker()->ProcessFeedbackMapsForElementAccess(maps);
+ broker()->ProcessFeedbackMapsForElementAccess(maps, keyed_mode);
for (ElementAccessFeedback::MapIterator it = result->all_maps(broker());
!it.done(); it.advance()) {
switch (mode) {
@@ -952,9 +1972,34 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
ProcessMapForNamedPropertyAccess(map_ref, name);
AccessInfoFactory access_info_factory(broker(), dependencies(),
broker()->zone());
- access_infos.push_back(access_info_factory.ComputePropertyAccessInfo(
+ PropertyAccessInfo info(access_info_factory.ComputePropertyAccessInfo(
map, name.object(), mode));
+ access_infos.push_back(info);
+
+ // TODO(turbofan): We want to take receiver hints into account as well,
+ // not only the feedback maps.
+ // For JSNativeContextSpecialization::InlinePropertySetterCall
+ // and InlinePropertyGetterCall.
+ if (info.IsAccessorConstant() && !info.constant().is_null()) {
+ if (info.constant()->IsJSFunction()) {
+ // For JSCallReducer::ReduceCallApiFunction.
+ Handle<SharedFunctionInfo> sfi(
+ handle(Handle<JSFunction>::cast(info.constant())->shared(),
+ broker()->isolate()));
+ if (sfi->IsApiFunction()) {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), handle(sfi->get_api_func_data(), broker()->isolate()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ ProcessReceiverMapForApiCall(fti_ref, map);
+ }
+ } else {
+ FunctionTemplateInfoRef fti_ref(
+ broker(), Handle<FunctionTemplateInfo>::cast(info.constant()));
+ if (fti_ref.has_call_code()) fti_ref.SerializeCallCode();
+ }
+ }
}
+
DCHECK(!access_infos.empty());
return new (broker()->zone()) NamedAccessFeedback(name, access_infos);
}
@@ -962,9 +2007,9 @@ SerializerForBackgroundCompilation::ProcessFeedbackMapsForNamedAccess(
void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
FeedbackSlot slot, AccessMode mode, base::Optional<NameRef> static_name) {
if (slot.IsInvalid()) return;
- if (environment()->function().feedback_vector.is_null()) return;
+ if (environment()->function().feedback_vector().is_null()) return;
- FeedbackNexus nexus(environment()->function().feedback_vector, slot);
+ FeedbackNexus nexus(environment()->function().feedback_vector(), slot);
FeedbackSource source(nexus);
if (broker()->HasFeedback(source)) return;
@@ -992,8 +2037,10 @@ void SerializerForBackgroundCompilation::ProcessFeedbackForPropertyAccess(
static_name.has_value() ? static_name : broker()->GetNameFeedback(nexus);
if (name.has_value()) {
processed = ProcessFeedbackMapsForNamedAccess(maps, mode, *name);
- } else if (nexus.GetKeyType() == ELEMENT && nexus.ic_state() != MEGAMORPHIC) {
- processed = ProcessFeedbackMapsForElementAccess(maps, mode);
+ } else if (nexus.GetKeyType() == ELEMENT) {
+ DCHECK_NE(nexus.ic_state(), MEGAMORPHIC);
+ processed = ProcessFeedbackMapsForElementAccess(
+ maps, mode, KeyedAccessMode::FromNexus(nexus));
}
broker()->SetFeedback(source, processed);
}
@@ -1087,8 +2134,8 @@ void SerializerForBackgroundCompilation::ProcessNamedPropertyAccess(
BytecodeArrayIterator* iterator, AccessMode mode) {
Hints const& receiver =
environment()->register_hints(iterator->GetRegisterOperand(0));
- Handle<Name> name(Name::cast(iterator->GetConstantForIndexOperand(1)),
- broker()->isolate());
+ Handle<Name> name = Handle<Name>::cast(
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), slot, mode);
}
@@ -1176,6 +2223,31 @@ UNCONDITIONAL_JUMPS_LIST(DEFINE_UNCONDITIONAL_JUMP)
IGNORED_BYTECODE_LIST(DEFINE_IGNORE)
#undef DEFINE_IGNORE
+#define DEFINE_UNREACHABLE(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ UNREACHABLE(); \
+ }
+UNREACHABLE_BYTECODE_LIST(DEFINE_UNREACHABLE)
+#undef DEFINE_UNREACHABLE
+
+#define DEFINE_KILL(name, ...) \
+ void SerializerForBackgroundCompilation::Visit##name( \
+ BytecodeArrayIterator* iterator) { \
+ environment()->Kill(); \
+ }
+KILL_ENVIRONMENT_LIST(DEFINE_KILL)
+#undef DEFINE_KILL
+
+#undef CLEAR_ENVIRONMENT_LIST
+#undef KILL_ENVIRONMENT_LIST
+#undef CLEAR_ACCUMULATOR_LIST
+#undef UNCONDITIONAL_JUMPS_LIST
+#undef CONDITIONAL_JUMPS_LIST
+#undef IGNORED_BYTECODE_LIST
+#undef UNREACHABLE_BYTECODE_LIST
+#undef SUPPORTED_BYTECODE_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.h b/deps/v8/src/compiler/serializer-for-background-compilation.h
index 0ee37ef280..881ed61a55 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.h
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.h
@@ -5,346 +5,31 @@
#ifndef V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
#define V8_COMPILER_SERIALIZER_FOR_BACKGROUND_COMPILATION_H_
-#include "src/base/optional.h"
-#include "src/compiler/access-info.h"
-#include "src/utils/utils.h"
#include "src/handles/handles.h"
-#include "src/handles/maybe-handles.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
-namespace interpreter {
-class BytecodeArrayIterator;
-} // namespace interpreter
-
-class BytecodeArray;
-class FeedbackVector;
-class LookupIterator;
-class NativeContext;
-class ScriptContextTable;
-class SharedFunctionInfo;
-class SourcePositionTableIterator;
+class BailoutId;
class Zone;
namespace compiler {
-#define CLEAR_ENVIRONMENT_LIST(V) \
- V(Abort) \
- V(CallRuntime) \
- V(CallRuntimeForPair) \
- V(CreateBlockContext) \
- V(CreateEvalContext) \
- V(CreateFunctionContext) \
- V(Debugger) \
- V(PopContext) \
- V(PushContext) \
- V(ResumeGenerator) \
- V(ReThrow) \
- V(StaContextSlot) \
- V(StaCurrentContextSlot) \
- V(SuspendGenerator) \
- V(SwitchOnGeneratorState) \
- V(Throw)
-
-#define CLEAR_ACCUMULATOR_LIST(V) \
- V(Add) \
- V(AddSmi) \
- V(BitwiseAnd) \
- V(BitwiseAndSmi) \
- V(BitwiseNot) \
- V(BitwiseOr) \
- V(BitwiseOrSmi) \
- V(BitwiseXor) \
- V(BitwiseXorSmi) \
- V(CloneObject) \
- V(CreateArrayFromIterable) \
- V(CreateArrayLiteral) \
- V(CreateEmptyArrayLiteral) \
- V(CreateEmptyObjectLiteral) \
- V(CreateMappedArguments) \
- V(CreateObjectLiteral) \
- V(CreateRestParameter) \
- V(CreateUnmappedArguments) \
- V(Dec) \
- V(DeletePropertySloppy) \
- V(DeletePropertyStrict) \
- V(Div) \
- V(DivSmi) \
- V(Exp) \
- V(ExpSmi) \
- V(ForInContinue) \
- V(ForInEnumerate) \
- V(ForInNext) \
- V(ForInStep) \
- V(GetTemplateObject) \
- V(Inc) \
- V(LdaContextSlot) \
- V(LdaCurrentContextSlot) \
- V(LdaImmutableContextSlot) \
- V(LdaImmutableCurrentContextSlot) \
- V(LogicalNot) \
- V(Mod) \
- V(ModSmi) \
- V(Mul) \
- V(MulSmi) \
- V(Negate) \
- V(SetPendingMessage) \
- V(ShiftLeft) \
- V(ShiftLeftSmi) \
- V(ShiftRight) \
- V(ShiftRightLogical) \
- V(ShiftRightLogicalSmi) \
- V(ShiftRightSmi) \
- V(Sub) \
- V(SubSmi) \
- V(TestEqual) \
- V(TestEqualStrict) \
- V(TestGreaterThan) \
- V(TestGreaterThanOrEqual) \
- V(TestInstanceOf) \
- V(TestLessThan) \
- V(TestLessThanOrEqual) \
- V(TestNull) \
- V(TestReferenceEqual) \
- V(TestTypeOf) \
- V(TestUndefined) \
- V(TestUndetectable) \
- V(ToBooleanLogicalNot) \
- V(ToName) \
- V(ToNumber) \
- V(ToNumeric) \
- V(ToString) \
- V(TypeOf)
-
-#define UNCONDITIONAL_JUMPS_LIST(V) \
- V(Jump) \
- V(JumpConstant) \
- V(JumpLoop)
-
-#define CONDITIONAL_JUMPS_LIST(V) \
- V(JumpIfFalse) \
- V(JumpIfFalseConstant) \
- V(JumpIfJSReceiver) \
- V(JumpIfJSReceiverConstant) \
- V(JumpIfNotNull) \
- V(JumpIfNotNullConstant) \
- V(JumpIfNotUndefined) \
- V(JumpIfNotUndefinedConstant) \
- V(JumpIfNull) \
- V(JumpIfNullConstant) \
- V(JumpIfToBooleanFalse) \
- V(JumpIfToBooleanFalseConstant) \
- V(JumpIfToBooleanTrue) \
- V(JumpIfToBooleanTrueConstant) \
- V(JumpIfTrue) \
- V(JumpIfTrueConstant) \
- V(JumpIfUndefined) \
- V(JumpIfUndefinedConstant)
-
-#define IGNORED_BYTECODE_LIST(V) \
- V(CallNoFeedback) \
- V(LdaNamedPropertyNoFeedback) \
- V(StackCheck) \
- V(StaNamedPropertyNoFeedback) \
- V(ThrowReferenceErrorIfHole) \
- V(ThrowSuperAlreadyCalledIfNotHole) \
- V(ThrowSuperNotCalledIfHole)
-
-#define SUPPORTED_BYTECODE_LIST(V) \
- V(CallAnyReceiver) \
- V(CallProperty) \
- V(CallProperty0) \
- V(CallProperty1) \
- V(CallProperty2) \
- V(CallUndefinedReceiver) \
- V(CallUndefinedReceiver0) \
- V(CallUndefinedReceiver1) \
- V(CallUndefinedReceiver2) \
- V(CallWithSpread) \
- V(Construct) \
- V(ConstructWithSpread) \
- V(CreateClosure) \
- V(ExtraWide) \
- V(GetSuperConstructor) \
- V(Illegal) \
- V(LdaConstant) \
- V(LdaFalse) \
- V(LdaGlobal) \
- V(LdaGlobalInsideTypeof) \
- V(LdaKeyedProperty) \
- V(LdaLookupGlobalSlot) \
- V(LdaLookupGlobalSlotInsideTypeof) \
- V(LdaNamedProperty) \
- V(LdaNull) \
- V(Ldar) \
- V(LdaSmi) \
- V(LdaTheHole) \
- V(LdaTrue) \
- V(LdaUndefined) \
- V(LdaZero) \
- V(Mov) \
- V(Return) \
- V(StaGlobal) \
- V(StaInArrayLiteral) \
- V(StaKeyedProperty) \
- V(StaNamedOwnProperty) \
- V(StaNamedProperty) \
- V(Star) \
- V(TestIn) \
- V(Wide) \
- CLEAR_ENVIRONMENT_LIST(V) \
- CLEAR_ACCUMULATOR_LIST(V) \
- CONDITIONAL_JUMPS_LIST(V) \
- UNCONDITIONAL_JUMPS_LIST(V) \
- IGNORED_BYTECODE_LIST(V)
-
+class CompilationDependencies;
class JSHeapBroker;
-template <typename T>
-struct HandleComparator {
- bool operator()(const Handle<T>& lhs, const Handle<T>& rhs) const {
- return lhs.address() < rhs.address();
- }
-};
-
-struct FunctionBlueprint {
- Handle<SharedFunctionInfo> shared;
- Handle<FeedbackVector> feedback_vector;
-
- bool operator<(const FunctionBlueprint& other) const {
- // A feedback vector is never used for more than one SFI, so it can
- // be used for strict ordering of blueprints.
- DCHECK_IMPLIES(feedback_vector.equals(other.feedback_vector),
- shared.equals(other.shared));
- return HandleComparator<FeedbackVector>()(feedback_vector,
- other.feedback_vector);
- }
-};
-
-class CompilationSubject {
- public:
- explicit CompilationSubject(FunctionBlueprint blueprint)
- : blueprint_(blueprint) {}
- CompilationSubject(Handle<JSFunction> closure, Isolate* isolate);
-
- FunctionBlueprint blueprint() const { return blueprint_; }
- MaybeHandle<JSFunction> closure() const { return closure_; }
-
- private:
- FunctionBlueprint blueprint_;
- MaybeHandle<JSFunction> closure_;
-};
-
-using ConstantsSet = ZoneSet<Handle<Object>, HandleComparator<Object>>;
-using MapsSet = ZoneSet<Handle<Map>, HandleComparator<Map>>;
-using BlueprintsSet = ZoneSet<FunctionBlueprint>;
-
-class Hints {
- public:
- explicit Hints(Zone* zone);
-
- const ConstantsSet& constants() const;
- const MapsSet& maps() const;
- const BlueprintsSet& function_blueprints() const;
-
- void AddConstant(Handle<Object> constant);
- void AddMap(Handle<Map> map);
- void AddFunctionBlueprint(FunctionBlueprint function_blueprint);
-
- void Add(const Hints& other);
-
- void Clear();
- bool IsEmpty() const;
-
- private:
- ConstantsSet constants_;
- MapsSet maps_;
- BlueprintsSet function_blueprints_;
-};
-using HintsVector = ZoneVector<Hints>;
-
enum class SerializerForBackgroundCompilationFlag : uint8_t {
kBailoutOnUninitialized = 1 << 0,
kCollectSourcePositions = 1 << 1,
- kOsr = 1 << 2,
+ kAnalyzeEnvironmentLiveness = 1 << 2,
};
using SerializerForBackgroundCompilationFlags =
base::Flags<SerializerForBackgroundCompilationFlag>;
-// The SerializerForBackgroundCompilation makes sure that the relevant function
-// data such as bytecode, SharedFunctionInfo and FeedbackVector, used by later
-// optimizations in the compiler, is copied to the heap broker.
-class SerializerForBackgroundCompilation {
- public:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- Handle<JSFunction> closure,
- SerializerForBackgroundCompilationFlags flags);
- Hints Run(); // NOTE: Returns empty for an already-serialized function.
-
- class Environment;
-
- private:
- SerializerForBackgroundCompilation(
- JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
- CompilationSubject function, base::Optional<Hints> new_target,
- const HintsVector& arguments,
- SerializerForBackgroundCompilationFlags flags);
-
- bool BailoutOnUninitialized(FeedbackSlot slot);
-
- void TraverseBytecode();
-
-#define DECLARE_VISIT_BYTECODE(name, ...) \
- void Visit##name(interpreter::BytecodeArrayIterator* iterator);
- SUPPORTED_BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
-#undef DECLARE_VISIT_BYTECODE
-
- void ProcessCallOrConstruct(Hints callee, base::Optional<Hints> new_target,
- const HintsVector& arguments, FeedbackSlot slot,
- bool with_spread = false);
- void ProcessCallVarArgs(interpreter::BytecodeArrayIterator* iterator,
- ConvertReceiverMode receiver_mode,
- bool with_spread = false);
-
- void ProcessJump(interpreter::BytecodeArrayIterator* iterator);
- void MergeAfterJump(interpreter::BytecodeArrayIterator* iterator);
-
- void ProcessKeyedPropertyAccess(Hints const& receiver, Hints const& key,
- FeedbackSlot slot, AccessMode mode);
- void ProcessNamedPropertyAccess(interpreter::BytecodeArrayIterator* iterator,
- AccessMode mode);
- void ProcessNamedPropertyAccess(Hints const& receiver, NameRef const& name,
- FeedbackSlot slot, AccessMode mode);
-
- GlobalAccessFeedback const* ProcessFeedbackForGlobalAccess(FeedbackSlot slot);
- NamedAccessFeedback const* ProcessFeedbackMapsForNamedAccess(
- const MapHandles& maps, AccessMode mode, NameRef const& name);
- ElementAccessFeedback const* ProcessFeedbackMapsForElementAccess(
- const MapHandles& maps, AccessMode mode);
- void ProcessFeedbackForPropertyAccess(FeedbackSlot slot, AccessMode mode,
- base::Optional<NameRef> static_name);
- void ProcessMapForNamedPropertyAccess(MapRef const& map, NameRef const& name);
-
- Hints RunChildSerializer(CompilationSubject function,
- base::Optional<Hints> new_target,
- const HintsVector& arguments, bool with_spread);
-
- JSHeapBroker* broker() const { return broker_; }
- CompilationDependencies* dependencies() const { return dependencies_; }
- Zone* zone() const { return zone_; }
- Environment* environment() const { return environment_; }
- SerializerForBackgroundCompilationFlags flags() const { return flags_; }
-
- JSHeapBroker* const broker_;
- CompilationDependencies* const dependencies_;
- Zone* const zone_;
- Environment* const environment_;
- ZoneUnorderedMap<int, Environment*> stashed_environments_;
- SerializerForBackgroundCompilationFlags const flags_;
-};
+void RunSerializerForBackgroundCompilation(
+ JSHeapBroker* broker, CompilationDependencies* dependencies, Zone* zone,
+ Handle<JSFunction> closure, SerializerForBackgroundCompilationFlags flags,
+ BailoutId osr_offset);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index cab398c160..6deba2b002 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -16,6 +16,7 @@ namespace internal {
namespace compiler {
namespace {
+static const int kNumLanes64 = 2;
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
@@ -76,6 +77,8 @@ void SimdScalarLowering::LowerGraph() {
}
}
+#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat)
+
#define FOREACH_INT32X4_OPCODE(V) \
V(I32x4Splat) \
V(I32x4ExtractLane) \
@@ -119,6 +122,8 @@ void SimdScalarLowering::LowerGraph() {
V(S1x16AnyTrue) \
V(S1x16AllTrue)
+#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
+
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
V(F32x4ExtractLane) \
@@ -208,8 +213,12 @@ void SimdScalarLowering::LowerGraph() {
MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
switch (simdType) {
+ case SimdType::kFloat64x2:
+ return MachineType::Float64();
case SimdType::kFloat32x4:
return MachineType::Float32();
+ case SimdType::kInt64x2:
+ return MachineType::Int64();
case SimdType::kInt32x4:
return MachineType::Int32();
case SimdType::kInt16x8:
@@ -223,6 +232,14 @@ MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
+ FOREACH_FLOAT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kFloat64x2;
+ break;
+ }
+ FOREACH_INT64X2_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
+ }
FOREACH_INT32X4_OPCODE(CASE_STMT)
case IrOpcode::kReturn:
case IrOpcode::kParameter:
@@ -326,7 +343,9 @@ static int GetReturnCountAfterLoweringSimd128(
int SimdScalarLowering::NumLanes(SimdType type) {
int num_lanes = 0;
- if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
+ if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) {
+ num_lanes = kNumLanes64;
+ } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
num_lanes = kNumLanes32;
} else if (type == SimdType::kInt16x8) {
num_lanes = kNumLanes16;
@@ -1198,7 +1217,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
-#undef F32x4_UNOP_CASE
+#undef F32X4_UNOP_CASE
case IrOpcode::kF32x4RecipApprox:
case IrOpcode::kF32x4RecipSqrtApprox: {
DCHECK_EQ(1, node->InputCount());
@@ -1223,8 +1242,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
- case IrOpcode::kI32x4Splat:
+ case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
+ case IrOpcode::kI64x2Splat:
+ case IrOpcode::kI32x4Splat:
case IrOpcode::kI16x8Splat:
case IrOpcode::kI8x16Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1347,7 +1368,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kS8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
- const uint8_t* shuffle = OpParameter<uint8_t*>(node->op());
+ const uint8_t* shuffle = S8x16ShuffleOf(node->op());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_node = zone()->NewArray<Node*>(16);
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 01ea195bdc..76723fcc77 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -32,7 +32,14 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kFloat32x4, kInt32x4, kInt16x8, kInt8x16 };
+ enum class SimdType : uint8_t {
+ kFloat64x2,
+ kFloat32x4,
+ kInt64x2,
+ kInt32x4,
+ kInt16x8,
+ kInt8x16
+ };
#if defined(V8_TARGET_BIG_ENDIAN)
static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8bc0e7af7b..b028a76bb0 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -22,8 +23,8 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/numbers/conversions-inl.h"
-#include "src/utils/address-map.h"
#include "src/objects/objects.h"
+#include "src/utils/address-map.h"
namespace v8 {
namespace internal {
@@ -279,7 +280,8 @@ class RepresentationSelector {
RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
RepresentationChanger* changer,
SourcePositionTable* source_positions,
- NodeOriginTable* node_origins)
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
zone_(zone),
count_(jsgraph->graph()->NodeCount()),
@@ -296,7 +298,8 @@ class RepresentationSelector {
source_positions_(source_positions),
node_origins_(node_origins),
type_cache_(TypeCache::Get()),
- op_typer_(broker, graph_zone()) {
+ op_typer_(broker, graph_zone()),
+ tick_counter_(tick_counter) {
}
// Forward propagation of types from type feedback.
@@ -444,6 +447,7 @@ class RepresentationSelector {
break; \
}
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(Name) \
@@ -747,21 +751,32 @@ class RepresentationSelector {
!GetUpperBound(node->InputAt(1)).Maybe(type);
}
+ void ChangeToDeadValue(Node* node, Node* effect, Node* control) {
+ DCHECK(TypeOf(node).IsNone());
+ // If the node is unreachable, insert an Unreachable node and mark the
+ // value dead.
+ // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
+ // InsertUnreachableIfNecessary.
+ Node* unreachable = effect =
+ graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
+ const Operator* dead_value =
+ jsgraph_->common()->DeadValue(GetInfo(node)->representation());
+ node->ReplaceInput(0, unreachable);
+ node->TrimInputCount(dead_value->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ NodeProperties::ChangeOp(node, dead_value);
+ }
+
void ChangeToPureOp(Node* node, const Operator* new_op) {
DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount());
if (node->op()->EffectInputCount() > 0) {
DCHECK_LT(0, node->op()->ControlInputCount());
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
if (TypeOf(node).IsNone()) {
- // If the node is unreachable, insert an Unreachable node and mark the
- // value dead.
- // TODO(jarin,tebbi) Find a way to unify/merge this insertion with
- // InsertUnreachableIfNecessary.
- Node* unreachable = effect = graph()->NewNode(
- jsgraph_->common()->Unreachable(), effect, control);
- new_op = jsgraph_->common()->DeadValue(GetInfo(node)->representation());
- node->ReplaceInput(0, unreachable);
+ ChangeToDeadValue(node, effect, control);
+ return;
}
// Rewire the effect and control chains.
node->TrimInputCount(new_op->ValueInputCount());
@@ -772,6 +787,30 @@ class RepresentationSelector {
NodeProperties::ChangeOp(node, new_op);
}
+ void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op,
+ int new_input_index, Node* new_input) {
+ DCHECK(new_op->HasProperty(Operator::kPure));
+ DCHECK_EQ(new_op->ValueInputCount(), 2);
+ DCHECK_EQ(node->op()->ValueInputCount(), 1);
+ DCHECK_LE(0, new_input_index);
+ DCHECK_LE(new_input_index, 1);
+ if (node->op()->EffectInputCount() > 0) {
+ DCHECK_LT(0, node->op()->ControlInputCount());
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ if (TypeOf(node).IsNone()) {
+ ChangeToDeadValue(node, effect, control);
+ return;
+ }
+ node->TrimInputCount(node->op()->ValueInputCount());
+ ReplaceEffectControlUses(node, effect, control);
+ } else {
+ DCHECK_EQ(0, node->op()->ControlInputCount());
+ }
+ node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
+ NodeProperties::ChangeOp(node, new_op);
+ }
+
// Converts input {index} of {node} according to given UseInfo {use},
// assuming the type of the input is {input_type}. If {input_type} is null,
// it takes the input from the input node {TypeOf(node->InputAt(index))}.
@@ -804,6 +843,10 @@ class RepresentationSelector {
}
void ProcessInput(Node* node, int index, UseInfo use) {
+ DCHECK_IMPLIES(use.type_check() != TypeCheckKind::kNone,
+ !node->op()->HasProperty(Operator::kNoDeopt) &&
+ node->op()->EffectInputCount() > 0);
+
switch (phase_) {
case PROPAGATE:
EnqueueInput(node, index, use);
@@ -958,7 +1001,8 @@ class RepresentationSelector {
return MachineRepresentation::kWord32;
} else if (type.Is(Type::Boolean())) {
return MachineRepresentation::kBit;
- } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsFloat64()) {
+ } else if (type.Is(Type::NumberOrOddball()) &&
+ use.TruncatesOddballAndBigIntToNumber()) {
return MachineRepresentation::kFloat64;
} else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) {
// TODO(turbofan): For Phis that return either NaN or some Smi, it's
@@ -968,6 +1012,8 @@ class RepresentationSelector {
return MachineRepresentation::kTagged;
} else if (type.Is(Type::Number())) {
return MachineRepresentation::kFloat64;
+ } else if (type.Is(Type::BigInt()) && use.IsUsedAsWord64()) {
+ return MachineRepresentation::kWord64;
} else if (type.Is(Type::ExternalPointer())) {
return MachineType::PointerRepresentation();
}
@@ -1109,8 +1155,11 @@ class RepresentationSelector {
if (IsAnyCompressed(rep)) {
return MachineType::AnyCompressed();
}
- // Word64 representation is only valid for safe integer values.
if (rep == MachineRepresentation::kWord64) {
+ if (type.Is(Type::BigInt())) {
+ return MachineType::AnyTagged();
+ }
+
DCHECK(type.Is(TypeCache::Get()->kSafeInteger));
return MachineType(rep, MachineSemantic::kInt64);
}
@@ -1126,7 +1175,17 @@ class RepresentationSelector {
void VisitStateValues(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
- EnqueueInput(node, i, UseInfo::Any());
+ // When lowering 64 bit BigInts to Word64 representation, we have to
+ // make sure they are rematerialized before deoptimization. By
+ // propagating a AnyTagged use, the RepresentationChanger is going to
+ // insert the necessary conversions.
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(node->InputAt(i)).Is(Type::BigInt())) {
+ EnqueueInput(node, i, UseInfo::AnyTagged());
+ } else {
+ EnqueueInput(node, i, UseInfo::Any());
+ }
}
} else if (lower()) {
Zone* zone = jsgraph_->zone();
@@ -1135,6 +1194,12 @@ class RepresentationSelector {
ZoneVector<MachineType>(node->InputCount(), zone);
for (int i = 0; i < node->InputCount(); i++) {
Node* input = node->InputAt(i);
+ // TODO(nicohartmann): Remove, once the deoptimizer can rematerialize
+ // truncated BigInts.
+ if (TypeOf(input).Is(Type::BigInt())) {
+ ProcessInput(node, i, UseInfo::AnyTagged());
+ }
+
(*types)[i] =
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
@@ -1621,6 +1686,8 @@ class RepresentationSelector {
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
+ tick_counter_->DoTick();
+
// Unconditionally eliminate unused pure nodes (only relevant if there's
// a pure operation in between two effectful ones, where the last one
// is unused).
@@ -1715,13 +1782,15 @@ class RepresentationSelector {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumberConvertBigInt:
case IrOpcode::kJSToNumeric: {
+ DCHECK(NodeProperties::GetType(node).Is(Type::Union(
+ Type::BigInt(), Type::NumberOrOddball(), graph()->zone())));
VisitInputs(node);
// TODO(bmeurer): Optimize somewhat based on input type?
if (truncation.IsUsedAsWord32()) {
SetOutput(node, MachineRepresentation::kWord32);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this);
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
SetOutput(node, MachineRepresentation::kFloat64);
if (lower())
lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this);
@@ -2461,6 +2530,20 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kCheckBigInt: {
+ if (InputIs(node, Type::BigInt())) {
+ VisitNoop(node, truncation);
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
+ case IrOpcode::kBigIntAsUintN: {
+ ProcessInput(node, 0, UseInfo::TruncatingWord64());
+ SetOutput(node, MachineRepresentation::kWord64, Type::BigInt());
+ return;
+ }
case IrOpcode::kNumberAcos:
case IrOpcode::kNumberAcosh:
case IrOpcode::kNumberAsin:
@@ -2621,6 +2704,43 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kSpeculativeBigIntAdd: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->machine()->Int64Add());
+ }
+ } else {
+ VisitBinop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ NodeProperties::ChangeOp(node, lowering->simplified()->BigIntAdd());
+ }
+ }
+ return;
+ }
+ case IrOpcode::kSpeculativeBigIntNegate: {
+ if (truncation.IsUsedAsWord64()) {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntTruncatingWord64(VectorSlotPair{}),
+ MachineRepresentation::kWord64);
+ if (lower()) {
+ ChangeUnaryToPureBinaryOp(node, lowering->machine()->Int64Sub(), 0,
+ jsgraph_->Int64Constant(0));
+ }
+ } else {
+ VisitUnop(node,
+ UseInfo::CheckedBigIntAsTaggedPointer(VectorSlotPair{}),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) {
+ ChangeToPureOp(node, lowering->simplified()->BigIntNegate());
+ }
+ }
+ return;
+ }
case IrOpcode::kStringConcat: {
// TODO(turbofan): We currently depend on having this first length input
// to make sure that the overflow check is properly scheduled before the
@@ -2657,6 +2777,10 @@ class RepresentationSelector {
MachineRepresentation::kTaggedPointer);
return;
}
+ case IrOpcode::kStringFromCodePointAt: {
+ return VisitBinop(node, UseInfo::AnyTagged(), UseInfo::Word(),
+ MachineRepresentation::kTaggedPointer);
+ }
case IrOpcode::kStringIndexOf: {
ProcessInput(node, 0, UseInfo::AnyTagged());
ProcessInput(node, 1, UseInfo::AnyTagged());
@@ -2983,7 +3107,7 @@ class RepresentationSelector {
simplified()->PlainPrimitiveToWord32());
}
}
- } else if (truncation.IsUsedAsFloat64()) {
+ } else if (truncation.TruncatesOddballAndBigIntToNumber()) {
if (InputIs(node, Type::NumberOrOddball())) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3236,7 +3360,7 @@ class RepresentationSelector {
// identifies NaN and undefined, we can just pass along
// the {truncation} and completely wipe the {node}.
if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsFloat64()) {
+ if (truncation.TruncatesOddballAndBigIntToNumber()) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -3263,7 +3387,7 @@ class RepresentationSelector {
MachineRepresentation::kWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if (InputIs(node, Type::NumberOrOddball()) &&
- truncation.IsUsedAsFloat64()) {
+ truncation.TruncatesOddballAndBigIntToNumber()) {
// Propagate the Float64 truncation.
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
@@ -3431,6 +3555,9 @@ class RepresentationSelector {
return SetOutput(node, MachineRepresentation::kNone);
case IrOpcode::kStaticAssert:
return VisitUnop(node, UseInfo::Any(), MachineRepresentation::kTagged);
+ case IrOpcode::kAssertType:
+ return VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTagged);
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
@@ -3534,6 +3661,7 @@ class RepresentationSelector {
NodeOriginTable* node_origins_;
TypeCache const* type_cache_;
OperationTyper op_typer_; // helper for the feedback typer
+ TickCounter* const tick_counter_;
NodeInfo* GetInfo(Node* node) {
DCHECK(node->id() < count_);
@@ -3547,19 +3675,22 @@ SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
Zone* zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level)
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level) {}
+ poisoning_level_(poisoning_level),
+ tick_counter_(tick_counter) {}
void SimplifiedLowering::LowerAllNodes() {
- RepresentationChanger changer(jsgraph(), jsgraph()->isolate());
+ RepresentationChanger changer(jsgraph(), broker_);
RepresentationSelector selector(jsgraph(), broker_, zone_, &changer,
- source_positions_, node_origins_);
+ source_positions_, node_origins_,
+ tick_counter_);
selector.Run(this);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index e434af9d4f..414e3588d7 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -12,6 +12,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -26,7 +29,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level);
+ PoisoningMitigationLevel poisoning_level,
+ TickCounter* tick_counter);
~SimplifiedLowering() = default;
void LowerAllNodes();
@@ -67,6 +71,8 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
PoisoningMitigationLevel poisoning_level_;
+ TickCounter* const tick_counter_;
+
Node* Float64Round(Node* const node);
Node* Float64Sign(Node* const node);
Node* Int32Abs(Node* const node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index ed3cfa8617..4f83635422 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -492,6 +492,18 @@ Handle<Map> FastMapParameterOf(const Operator* op) {
return Handle<Map>::null();
}
+std::ostream& operator<<(std::ostream& os, BigIntOperationHint hint) {
+ switch (hint) {
+ case BigIntOperationHint::kBigInt:
+ return os << "BigInt";
+ }
+ UNREACHABLE();
+}
+
+size_t hash_value(BigIntOperationHint hint) {
+ return static_cast<uint8_t>(hint);
+}
+
std::ostream& operator<<(std::ostream& os, NumberOperationHint hint) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
@@ -585,12 +597,6 @@ Type AllocateTypeOf(const Operator* op) {
return AllocateParametersOf(op).type();
}
-UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kStringFromSingleCodePoint ||
- op->opcode() == IrOpcode::kStringCodePointAt);
- return OpParameter<UnicodeEncoding>(op);
-}
-
AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
return static_cast<AbortReason>(OpParameter<int>(op));
@@ -702,9 +708,11 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
V(StringConcat, Operator::kNoProperties, 3, 0) \
V(StringToNumber, Operator::kNoProperties, 1, 0) \
V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
@@ -713,6 +721,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
@@ -723,6 +732,7 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeCompressedToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToCompressedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
@@ -730,6 +740,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
@@ -769,9 +781,12 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(NewConsString, Operator::kNoProperties, 3, 0) \
V(PoisonIndex, Operator::kNoProperties, 1, 0)
-#define EFFECT_DEPENDENT_OP_LIST(V) \
- V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
- V(StringSubstring, Operator::kNoProperties, 3, 1) \
+#define EFFECT_DEPENDENT_OP_LIST(V) \
+ V(BigIntAdd, Operator::kNoProperties, 2, 1) \
+ V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringFromCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(StringSubstring, Operator::kNoProperties, 3, 1) \
V(DateNow, Operator::kNoProperties, 0, 1)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -801,6 +816,8 @@ bool operator==(CheckMinusZeroParameters const& lhs,
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
+ V(CheckBigInt, 1, 1) \
+ V(CheckedInt32ToCompressedSigned, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedInt64ToInt32, 1, 1) \
V(CheckedInt64ToTaggedSigned, 1, 1) \
@@ -895,32 +912,6 @@ struct SimplifiedOperatorGlobalCache final {
DEOPTIMIZE_REASON_LIST(CHECK_IF)
#undef CHECK_IF
- template <UnicodeEncoding kEncoding>
- struct StringCodePointAtOperator final : public Operator1<UnicodeEncoding> {
- StringCodePointAtOperator()
- : Operator1<UnicodeEncoding>(IrOpcode::kStringCodePointAt,
- Operator::kFoldable | Operator::kNoThrow,
- "StringCodePointAt", 2, 1, 1, 1, 1, 0,
- kEncoding) {}
- };
- StringCodePointAtOperator<UnicodeEncoding::UTF16>
- kStringCodePointAtOperatorUTF16;
- StringCodePointAtOperator<UnicodeEncoding::UTF32>
- kStringCodePointAtOperatorUTF32;
-
- template <UnicodeEncoding kEncoding>
- struct StringFromSingleCodePointOperator final
- : public Operator1<UnicodeEncoding> {
- StringFromSingleCodePointOperator()
- : Operator1<UnicodeEncoding>(
- IrOpcode::kStringFromSingleCodePoint, Operator::kPure,
- "StringFromSingleCodePoint", 1, 0, 0, 1, 0, 0, kEncoding) {}
- };
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF16>
- kStringFromSingleCodePointOperatorUTF16;
- StringFromSingleCodePointOperator<UnicodeEncoding::UTF32>
- kStringFromSingleCodePointOperatorUTF32;
-
struct FindOrderedHashMapEntryOperator final : public Operator {
FindOrderedHashMapEntryOperator()
: Operator(IrOpcode::kFindOrderedHashMapEntry, Operator::kEliminatable,
@@ -1236,6 +1227,20 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
static_cast<int>(reason)); // parameter
}
+const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) {
+ CHECK(0 <= bits && bits <= 64);
+
+ return new (zone()) Operator1<int>(IrOpcode::kBigIntAsUintN, Operator::kPure,
+ "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits);
+}
+
+const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
+ DCHECK(type.IsRange());
+ return new (zone()) Operator1<Type>(IrOpcode::kAssertType,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "AssertType", 1, 0, 0, 1, 0, 0, type);
+}
+
const Operator* SimplifiedOperatorBuilder::CheckIf(
DeoptimizeReason reason, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1433,6 +1438,21 @@ const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
CheckFloat64HoleParameters(mode, feedback));
}
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAdd(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntAdd, Operator::kFoldable | Operator::kNoThrow,
+ "SpeculativeBigIntAdd", 2, 1, 1, 1, 1, 0, hint);
+}
+
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntNegate(
+ BigIntOperationHint hint) {
+ return new (zone()) Operator1<BigIntOperationHint>(
+ IrOpcode::kSpeculativeBigIntNegate,
+ Operator::kFoldable | Operator::kNoThrow, "SpeculativeBigIntNegate", 1, 1,
+ 1, 1, 1, 0, hint);
+}
+
const Operator* SimplifiedOperatorBuilder::SpeculativeToNumber(
NumberOperationHint hint, const VectorSlotPair& feedback) {
if (!feedback.IsValid()) {
@@ -1655,28 +1675,6 @@ const Operator* SimplifiedOperatorBuilder::AllocateRaw(
AllocateParameters(type, allocation, allow_large_objects));
}
-const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringCodePointAtOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringCodePointAtOperatorUTF32;
- }
- UNREACHABLE();
-}
-
-const Operator* SimplifiedOperatorBuilder::StringFromSingleCodePoint(
- UnicodeEncoding encoding) {
- switch (encoding) {
- case UnicodeEncoding::UTF16:
- return &cache_.kStringFromSingleCodePointOperatorUTF16;
- case UnicodeEncoding::UTF32:
- return &cache_.kStringFromSingleCodePointOperatorUTF32;
- }
- UNREACHABLE();
-}
-
#define SPECULATIVE_NUMBER_BINOP(Name) \
const Operator* SimplifiedOperatorBuilder::Name(NumberOperationHint hint) { \
switch (hint) { \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d93544c5cd..bdac796adf 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -475,10 +475,15 @@ enum class NumberOperationHint : uint8_t {
kNumberOrOddball, // Inputs were Number or Oddball, output was Number.
};
+enum class BigIntOperationHint : uint8_t {
+ kBigInt,
+};
+
size_t hash_value(NumberOperationHint);
+size_t hash_value(BigIntOperationHint);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
-
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BigIntOperationHint);
V8_EXPORT_PRIVATE NumberOperationHint NumberOperationHintOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
@@ -634,6 +639,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberSilenceNaN();
+ const Operator* BigIntAdd();
+ const Operator* BigIntNegate();
+
const Operator* SpeculativeSafeIntegerAdd(NumberOperationHint hint);
const Operator* SpeculativeSafeIntegerSubtract(NumberOperationHint hint);
@@ -653,6 +661,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeNumberLessThanOrEqual(NumberOperationHint hint);
const Operator* SpeculativeNumberEqual(NumberOperationHint hint);
+ const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint);
+ const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint);
+ const Operator* BigIntAsUintN(int bits);
+
const Operator* ReferenceEqual();
const Operator* SameValue();
const Operator* SameValueNumbersOnly();
@@ -666,9 +678,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
const Operator* StringCharCodeAt();
- const Operator* StringCodePointAt(UnicodeEncoding encoding);
+ const Operator* StringCodePointAt();
const Operator* StringFromSingleCharCode();
- const Operator* StringFromSingleCodePoint(UnicodeEncoding encoding);
+ const Operator* StringFromSingleCodePoint();
+ const Operator* StringFromCodePointAt();
const Operator* StringIndexOf();
const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
@@ -686,6 +699,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* PlainPrimitiveToWord32();
const Operator* PlainPrimitiveToFloat64();
+ const Operator* ChangeCompressedSignedToInt32();
const Operator* ChangeTaggedSignedToInt32();
const Operator* ChangeTaggedSignedToInt64();
const Operator* ChangeTaggedToInt32();
@@ -695,6 +709,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeTaggedToTaggedSigned();
const Operator* ChangeCompressedToTaggedSigned();
const Operator* ChangeTaggedToCompressedSigned();
+ const Operator* ChangeInt31ToCompressedSigned();
const Operator* ChangeInt31ToTaggedSigned();
const Operator* ChangeInt32ToTagged();
const Operator* ChangeInt64ToTagged();
@@ -704,6 +719,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ChangeFloat64ToTaggedPointer();
const Operator* ChangeTaggedToBit();
const Operator* ChangeBitToTagged();
+ const Operator* TruncateBigIntToUint64();
+ const Operator* ChangeUint64ToBigInt();
const Operator* TruncateTaggedToWord32();
const Operator* TruncateTaggedToFloat64();
const Operator* TruncateTaggedToBit();
@@ -740,6 +757,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckedInt32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToCompressedSigned(
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToInt32(const VectorSlotPair& feedback);
const Operator* CheckedInt64ToTaggedSigned(const VectorSlotPair& feedback);
@@ -752,6 +771,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckBigInt(const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedPointer(
const VectorSlotPair& feedback);
const Operator* CheckedCompressedToTaggedSigned(
@@ -874,6 +894,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// Abort (for terminating execution on internal error).
const Operator* RuntimeAbort(AbortReason reason);
+ // Abort if the value input does not inhabit the given type
+ const Operator* AssertType(Type type);
+
const Operator* DateNow();
private:
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index c00613c232..2bb5a0a4b5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -329,9 +329,7 @@ void StateValuesAccess::iterator::Pop() {
current_depth_--;
}
-
-bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
-
+bool StateValuesAccess::iterator::done() const { return current_depth_ < 0; }
void StateValuesAccess::iterator::Advance() {
Top()->Advance();
@@ -392,14 +390,12 @@ MachineType StateValuesAccess::iterator::type() {
}
}
-
-bool StateValuesAccess::iterator::operator!=(iterator& other) {
+bool StateValuesAccess::iterator::operator!=(iterator const& other) {
// We only allow comparison with end().
CHECK(other.done());
return !done();
}
-
StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
Advance();
return *this;
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 00ec3bb351..0ff5d218f1 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -92,7 +92,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
class V8_EXPORT_PRIVATE iterator {
public:
// Bare minimum of operators needed for range iteration.
- bool operator!=(iterator& other);
+ bool operator!=(iterator const& other);
iterator& operator++();
TypedNode operator*();
@@ -104,7 +104,7 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
Node* node();
MachineType type();
- bool done();
+ bool done() const;
void Advance();
void EnsureValid();
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 13d8199745..b71bcd7e66 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -6,6 +6,7 @@
#include "src/compiler/store-store-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -129,7 +130,8 @@ namespace {
class RedundantStoreFinder final {
public:
- RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone);
+ RedundantStoreFinder(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
void Find();
@@ -157,6 +159,7 @@ class RedundantStoreFinder final {
ZoneSet<Node*>& to_remove() { return to_remove_; }
JSGraph* const jsgraph_;
+ TickCounter* const tick_counter_;
Zone* const temp_zone_;
ZoneStack<Node*> revisit_;
@@ -199,6 +202,7 @@ void RedundantStoreFinder::Find() {
Visit(jsgraph()->graph()->end());
while (!revisit_.empty()) {
+ tick_counter_->DoTick();
Node* next = revisit_.top();
revisit_.pop();
DCHECK_LT(next->id(), in_revisit_.size());
@@ -230,9 +234,10 @@ bool RedundantStoreFinder::HasBeenVisited(Node* node) {
return !unobservable_for_id(node->id()).IsUnvisited();
}
-void StoreStoreElimination::Run(JSGraph* js_graph, Zone* temp_zone) {
+void StoreStoreElimination::Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone) {
// Find superfluous nodes
- RedundantStoreFinder finder(js_graph, temp_zone);
+ RedundantStoreFinder finder(js_graph, tick_counter, temp_zone);
finder.Find();
// Remove superfluous nodes
@@ -336,8 +341,11 @@ bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
}
// Initialize unobservable_ with js_graph->graph->NodeCount() empty sets.
-RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph, Zone* temp_zone)
+RedundantStoreFinder::RedundantStoreFinder(JSGraph* js_graph,
+ TickCounter* tick_counter,
+ Zone* temp_zone)
: jsgraph_(js_graph),
+ tick_counter_(tick_counter),
temp_zone_(temp_zone),
revisit_(temp_zone),
in_revisit_(js_graph->graph()->NodeCount(), temp_zone),
diff --git a/deps/v8/src/compiler/store-store-elimination.h b/deps/v8/src/compiler/store-store-elimination.h
index cda7591fcc..646640a310 100644
--- a/deps/v8/src/compiler/store-store-elimination.h
+++ b/deps/v8/src/compiler/store-store-elimination.h
@@ -11,11 +11,15 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
class StoreStoreElimination final {
public:
- static void Run(JSGraph* js_graph, Zone* temp_zone);
+ static void Run(JSGraph* js_graph, TickCounter* tick_counter,
+ Zone* temp_zone);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 4cf2c38bdb..5dbbad3dcd 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include "src/base/flags.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
@@ -33,13 +34,15 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
+Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter)
: flags_(flags),
graph_(graph),
decorator_(nullptr),
cache_(TypeCache::Get()),
broker_(broker),
- operation_typer_(broker, zone()) {
+ operation_typer_(broker, zone()),
+ tick_counter_(tick_counter) {
singleton_false_ = operation_typer_.singleton_false();
singleton_true_ = operation_typer_.singleton_true();
@@ -47,7 +50,6 @@ Typer::Typer(JSHeapBroker* broker, Flags flags, Graph* graph)
graph_->AddDecorator(decorator_);
}
-
Typer::~Typer() {
graph_->RemoveDecorator(decorator_);
}
@@ -91,14 +93,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return UpdateType(node, TypeBinaryOp(node, x));
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return UpdateType(node, TypeUnaryOp(node, x));
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -157,14 +163,18 @@ class Typer::Visitor : public Reducer {
case IrOpcode::k##x: \
return TypeBinaryOp(node, x);
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
return TypeUnaryOp(node, x);
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_CASE)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) case IrOpcode::k##x:
@@ -276,14 +286,18 @@ class Typer::Visitor : public Reducer {
return t->operation_typer_.Name(type); \
}
SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_UNOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
#define DECLARE_METHOD(Name) \
static Type Name(Type lhs, Type rhs, Typer* t) { \
return t->operation_typer_.Name(lhs, rhs); \
}
SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_BIGINT_BINOP_LIST(DECLARE_METHOD)
SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
+ SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
static Type ObjectIsArrayBufferView(Type, Typer*);
@@ -410,7 +424,7 @@ void Typer::Run(const NodeVector& roots,
induction_vars->ChangeToInductionVariablePhis();
}
Visitor visitor(this, induction_vars);
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter_);
graph_reducer.AddReducer(&visitor);
for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
@@ -798,6 +812,8 @@ Type Typer::Visitor::TypeHeapConstant(Node* node) {
return TypeConstant(HeapConstantOf(node->op()));
}
+Type Typer::Visitor::TypeCompressedHeapConstant(Node* node) { UNREACHABLE(); }
+
Type Typer::Visitor::TypeExternalConstant(Node* node) {
return Type::ExternalPointer();
}
@@ -2060,6 +2076,10 @@ Type Typer::Visitor::TypeStringFromSingleCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromSingleCodePointTyper);
}
+Type Typer::Visitor::TypeStringFromCodePointAt(Node* node) {
+ return Type::String();
+}
+
Type Typer::Visitor::TypeStringIndexOf(Node* node) {
return Type::Range(-1.0, String::kMaxLength, zone());
}
@@ -2336,6 +2356,8 @@ Type Typer::Visitor::TypeFindOrderedHashMapEntryForInt32Key(Node* node) {
Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); }
+
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index fa87d81f1e..305470d724 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -11,6 +11,9 @@
namespace v8 {
namespace internal {
+
+class TickCounter;
+
namespace compiler {
// Forward declarations.
@@ -25,7 +28,8 @@ class V8_EXPORT_PRIVATE Typer {
};
using Flags = base::Flags<Flag>;
- Typer(JSHeapBroker* broker, Flags flags, Graph* graph);
+ Typer(JSHeapBroker* broker, Flags flags, Graph* graph,
+ TickCounter* tick_counter);
~Typer();
void Run();
@@ -49,6 +53,7 @@ class V8_EXPORT_PRIVATE Typer {
TypeCache const* cache_;
JSHeapBroker* broker_;
OperationTyper operation_typer_;
+ TickCounter* const tick_counter_;
Type singleton_false_;
Type singleton_true_;
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index edf07a4ffd..d4267a75fe 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -6,9 +6,10 @@
#include "src/compiler/types.h"
-#include "src/utils/ostreams.h"
#include "src/handles/handles-inl.h"
+#include "src/objects/instance-type.h"
#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -202,7 +203,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
return kOtherObject;
case JS_ARRAY_TYPE:
return kArray;
- case JS_VALUE_TYPE:
+ case JS_PRIMITIVE_WRAPPER_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
#ifdef V8_INTL_SUPPORT
@@ -312,8 +313,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case SCRIPT_TYPE:
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
- case MODULE_TYPE:
- case MODULE_INFO_ENTRY_TYPE:
+ case SOURCE_TEXT_MODULE_TYPE:
+ case SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE:
+ case SYNTHETIC_MODULE_TYPE:
case CELL_TYPE:
case PREPARSE_DATA_TYPE:
case UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE:
@@ -349,6 +351,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case ENUM_CACHE_TYPE:
case SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE:
case WASM_CAPI_FUNCTION_DATA_TYPE:
+ case WASM_INDIRECT_FUNCTION_TABLE_TYPE:
case WASM_DEBUG_INFO_TYPE:
case WASM_EXCEPTION_TAG_TYPE:
case WASM_EXPORTED_FUNCTION_DATA_TYPE:
@@ -363,6 +366,9 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case PROMISE_REJECT_REACTION_JOB_TASK_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE:
case FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE:
+#define MAKE_TORQUE_CLASS_TYPE(V) case V:
+ TORQUE_DEFINED_INSTANCE_TYPES(MAKE_TORQUE_CLASS_TYPE)
+#undef MAKE_TORQUE_CLASS_TYPE
UNREACHABLE();
}
UNREACHABLE();
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index 21aaab5036..0dc1aa77b0 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
-#include "src/compiler/js-heap-broker.h"
+#include "src/compiler/heap-refs.h"
#include "src/handles/handles.h"
#include "src/numbers/conversions.h"
#include "src/objects/objects.h"
@@ -220,6 +220,7 @@ namespace compiler {
INTERNAL_BITSET_TYPE_LIST(V) \
PROPER_BITSET_TYPE_LIST(V)
+class JSHeapBroker;
class HeapConstantType;
class OtherNumberConstantType;
class TupleType;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 3f1b2e9f13..d3d4d54ea2 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -431,6 +431,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Number());
break;
case IrOpcode::kHeapConstant:
+ case IrOpcode::kCompressedHeapConstant:
// Constants have no inputs.
CHECK_EQ(0, input_count);
// Type is anything.
@@ -933,7 +934,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kDebugAbort:
+ case IrOpcode::kAbortCSAAssert:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
@@ -975,6 +976,25 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kSpeculativeBigIntAdd:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kSpeculativeBigIntNegate:
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAsUintN:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntAdd:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckValueInputIs(node, 1, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kBigIntNegate:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kNumberAdd:
case IrOpcode::kNumberSubtract:
case IrOpcode::kNumberMultiply:
@@ -1156,6 +1176,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kStringFromCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringIndexOf:
// (String, String, SignedSmall) -> SignedSmall
CheckValueInputIs(node, 0, Type::String());
@@ -1306,6 +1332,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckNotTyped(node);
break;
+ case IrOpcode::kChangeCompressedSignedToInt32:
case IrOpcode::kChangeTaggedSignedToInt32: {
// Signed32 /\ Tagged -> Signed32 /\ UntaggedInt32
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1360,6 +1387,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kChangeInt31ToCompressedSigned:
case IrOpcode::kChangeInt31ToTaggedSigned: {
// Signed31 /\ UntaggedInt32 -> Signed31 /\ Tagged
// TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1429,6 +1457,14 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// CheckTypeIs(node, to));
break;
}
+ case IrOpcode::kTruncateBigIntToUint64:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
+ case IrOpcode::kChangeUint64ToBigInt:
+ CheckValueInputIs(node, 0, Type::BigInt());
+ CheckTypeIs(node, Type::BigInt());
+ break;
case IrOpcode::kTruncateTaggedToBit:
case IrOpcode::kTruncateTaggedPointerToBit:
break;
@@ -1498,6 +1534,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedUint32Div:
case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedInt32Mul:
+ case IrOpcode::kCheckedInt32ToCompressedSigned:
case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedInt64ToInt32:
case IrOpcode::kCheckedInt64ToTaggedSigned:
@@ -1520,6 +1557,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kCheckedTaggedToCompressedSigned:
case IrOpcode::kCheckedTaggedToCompressedPointer:
case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kAssertType:
break;
case IrOpcode::kCheckFloat64Hole:
@@ -1619,6 +1657,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CHECK_EQ(0, value_count);
CheckTypeIs(node, Type::Number());
break;
+ case IrOpcode::kCheckBigInt:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::BigInt());
+ break;
// Machine operators
// -----------------------
@@ -1755,6 +1797,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kBitcastInt32ToFloat32:
case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kBitcastTaggedToWord:
+ case IrOpcode::kBitcastTaggedSignedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kBitcastWordToTaggedSigned:
case IrOpcode::kChangeInt32ToInt64:
@@ -1800,6 +1843,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
+ case IrOpcode::kMemoryBarrier:
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicExchange:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 3396214e58..2da7177ece 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -14,6 +14,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/assembler.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/compiler.h"
#include "src/codegen/interface-descriptors.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -276,8 +277,9 @@ Node* WasmGraphBuilder::EffectPhi(unsigned count, Node** effects,
}
Node* WasmGraphBuilder::RefNull() {
- return LOAD_INSTANCE_FIELD(NullValue,
- MachineType::TypeCompressedTaggedPointer());
+ Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(
+ isolate_root, IsolateData::root_slot_offset(RootIndex::kNullValue));
}
Node* WasmGraphBuilder::RefFunc(uint32_t function_index) {
@@ -2195,8 +2197,8 @@ Node* WasmGraphBuilder::Throw(uint32_t exception_index,
graph()->NewNode(m->I32x4ExtractLane(3), value));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
STORE_FIXED_ARRAY_SLOT_ANY(values_array, index, value);
++index;
break;
@@ -2334,8 +2336,8 @@ Node** WasmGraphBuilder::GetExceptionValues(
BuildDecodeException32BitValue(values_array, &index));
break;
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
value = LOAD_FIXED_ARRAY_SLOT_ANY(values_array, index);
++index;
break;
@@ -2853,25 +2855,69 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
Node** args, Node*** rets,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, rets, position, kCallContinues);
- }
return BuildIndirectCall(table_index, sig_index, args, rets, position,
kCallContinues);
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
+void WasmGraphBuilder::LoadIndirectFunctionTable(uint32_t table_index,
+ Node** ift_size,
+ Node** ift_sig_ids,
+ Node** ift_targets,
+ Node** ift_instances) {
+ if (table_index == 0) {
+ *ift_size =
+ LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ *ift_sig_ids = LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds,
+ MachineType::Pointer());
+ *ift_targets = LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets,
+ MachineType::Pointer());
+ *ift_instances = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
+ return;
+ }
+
+ Node* ift_tables = LOAD_INSTANCE_FIELD(
+ IndirectFunctionTables, MachineType::TypeCompressedTaggedPointer());
+ Node* ift_table = LOAD_FIXED_ARRAY_SLOT_ANY(ift_tables, table_index);
+
+ *ift_size = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSizeOffset),
+ MachineType::Int32());
+
+ *ift_sig_ids = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kSigIdsOffset),
+ MachineType::Pointer());
+
+ *ift_targets = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kTargetsOffset),
+ MachineType::Pointer());
+
+ *ift_instances = LOAD_RAW(
+ ift_table,
+ wasm::ObjectAccess::ToTagged(WasmIndirectFunctionTable::kRefsOffset),
+ MachineType::TypeCompressedTaggedPointer());
+}
+
+Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
+ uint32_t sig_index, Node** args,
Node*** rets,
wasm::WasmCodePosition position,
IsReturnCall continuation) {
DCHECK_NOT_NULL(args[0]);
DCHECK_NOT_NULL(env_);
- // Assume only one table for now.
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
+ // First we have to load the table.
+ Node* ift_size;
+ Node* ift_sig_ids;
+ Node* ift_targets;
+ Node* ift_instances;
+ LoadIndirectFunctionTable(table_index, &ift_size, &ift_sig_ids, &ift_targets,
+ &ift_instances);
- Node* ift_size =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSize, MachineType::Uint32());
+ wasm::FunctionSig* sig = env_->module->signatures[sig_index];
MachineOperatorBuilder* machine = mcgraph()->machine();
Node* key = args[0];
@@ -2894,9 +2940,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
// Load signature from the table and check.
- Node* ift_sig_ids =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableSigIds, MachineType::Pointer());
-
int32_t expected_sig_id = env_->module->signature_ids[sig_index];
Node* int32_scaled_key = Uint32ToUintptr(
graph()->NewNode(machine->Word32Shl(), key, Int32Constant(2)));
@@ -2909,11 +2952,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- Node* ift_targets =
- LOAD_INSTANCE_FIELD(IndirectFunctionTableTargets, MachineType::Pointer());
- Node* ift_instances = LOAD_INSTANCE_FIELD(
- IndirectFunctionTableRefs, MachineType::TypeCompressedTaggedPointer());
-
Node* tagged_scaled_key;
if (kTaggedSize == kInt32Size) {
tagged_scaled_key = int32_scaled_key;
@@ -2955,48 +2993,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t sig_index, Node** args,
}
}
-Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
- uint32_t sig_index, Node** args,
- Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation) {
- DCHECK_NOT_NULL(args[0]);
- Node* entry_index = args[0];
- DCHECK_NOT_NULL(env_);
- BoundsCheckTable(table_index, entry_index, position, wasm::kTrapFuncInvalid,
- nullptr);
-
- DCHECK(Smi::IsValid(table_index));
- DCHECK(Smi::IsValid(sig_index));
- Node* runtime_args[]{
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_index)),
- BuildChangeUint31ToSmi(entry_index),
- graph()->NewNode(mcgraph()->common()->NumberConstant(sig_index))};
-
- Node* target_instance = BuildCallToRuntime(
- Runtime::kWasmIndirectCallCheckSignatureAndGetTargetInstance,
- runtime_args, arraysize(runtime_args));
-
- // We reuse the runtime_args array here, even though we only need the first
- // two arguments.
- Node* call_target = BuildCallToRuntime(
- Runtime::kWasmIndirectCallGetTargetAddress, runtime_args, 2);
-
- wasm::FunctionSig* sig = env_->module->signatures[sig_index];
- args[0] = call_target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
- switch (continuation) {
- case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
- case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
- }
-}
-
Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
wasm::WasmCodePosition position) {
DCHECK_NULL(args[0]);
@@ -3019,9 +3015,6 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, Node** args,
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
uint32_t sig_index, Node** args,
wasm::WasmCodePosition position) {
- if (table_index == 0) {
- return BuildIndirectCall(sig_index, args, nullptr, position, kReturnCall);
- }
return BuildIndirectCall(table_index, sig_index, args, nullptr, position,
kReturnCall);
}
@@ -3324,13 +3317,6 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
return result;
}
-Node* WasmGraphBuilder::BuildLoadBuiltinFromInstance(int builtin_index) {
- DCHECK(Builtins::IsBuiltinId(builtin_index));
- Node* isolate_root = LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
- return LOAD_TAGGED_POINTER(isolate_root,
- IsolateData::builtin_slot_offset(builtin_index));
-}
-
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded js_context.
Node* WasmGraphBuilder::BuildCallToRuntimeWithContext(
@@ -3492,7 +3478,7 @@ void WasmGraphBuilder::GetTableBaseAndOffset(uint32_t table_index,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0)));
}
-Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
+Node* WasmGraphBuilder::TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3501,7 +3487,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
return LOAD_RAW_NODE_OFFSET(base, offset,
MachineType::TypeCompressedTagged());
}
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableGetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -3521,7 +3507,7 @@ Node* WasmGraphBuilder::GetTable(uint32_t table_index, Node* index,
Effect(), Control())));
}
-Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
+Node* WasmGraphBuilder::TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position) {
if (env_->module->tables[table_index].type == wasm::kWasmAnyRef) {
Node* base = nullptr;
@@ -3530,7 +3516,7 @@ Node* WasmGraphBuilder::SetTable(uint32_t table_index, Node* index, Node* val,
return STORE_RAW_NODE_OFFSET(
base, offset, val, MachineRepresentation::kTagged, kFullWriteBarrier);
} else {
- // We access anyfunc tables through runtime calls.
+ // We access funcref tables through runtime calls.
WasmTableSetDescriptor interface_descriptor;
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), // zone
@@ -4000,6 +3986,30 @@ Node* WasmGraphBuilder::S128Zero() {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Splat(), inputs[0]);
+ case wasm::kExprF64x2Abs:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Abs(), inputs[0]);
+ case wasm::kExprF64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Neg(), inputs[0]);
+ case wasm::kExprF64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Lt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Le:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF64x2Gt:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Lt(), inputs[1],
+ inputs[0]);
+ case wasm::kExprF64x2Ge:
+ return graph()->NewNode(mcgraph()->machine()->F64x2Le(), inputs[1],
+ inputs[0]);
case wasm::kExprF32x4Splat:
return graph()->NewNode(mcgraph()->machine()->F32x4Splat(), inputs[0]);
case wasm::kExprF32x4SConvertI32x4:
@@ -4054,6 +4064,49 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprF32x4Ge:
return graph()->NewNode(mcgraph()->machine()->F32x4Le(), inputs[1],
inputs[0]);
+ case wasm::kExprI64x2Splat:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Splat(), inputs[0]);
+ case wasm::kExprI64x2Neg:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Neg(), inputs[0]);
+ case wasm::kExprI64x2Add:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Sub:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Mul:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Mul(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Eq:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Eq(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2Ne:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Ne(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeS(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2LtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2LeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[1],
+ inputs[0]);
+ case wasm::kExprI64x2GtU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GtU(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI64x2GeU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2GeU(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(mcgraph()->machine()->I32x4Splat(), inputs[0]);
case wasm::kExprI32x4SConvertF32x4:
@@ -4305,6 +4358,10 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
case wasm::kExprS128Select:
return graph()->NewNode(mcgraph()->machine()->S128Select(), inputs[2],
inputs[0], inputs[1]);
+ case wasm::kExprS1x2AnyTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AnyTrue(), inputs[0]);
+ case wasm::kExprS1x2AllTrue:
+ return graph()->NewNode(mcgraph()->machine()->S1x2AllTrue(), inputs[0]);
case wasm::kExprS1x4AnyTrue:
return graph()->NewNode(mcgraph()->machine()->S1x4AnyTrue(), inputs[0]);
case wasm::kExprS1x4AllTrue:
@@ -4326,12 +4383,24 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->F64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ExtractLane(lane),
inputs[0]);
case wasm::kExprF32x4ReplaceLane:
return graph()->NewNode(mcgraph()->machine()->F32x4ReplaceLane(lane),
inputs[0], inputs[1]);
+ case wasm::kExprI64x2ExtractLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI64x2ReplaceLane:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4ExtractLane:
return graph()->NewNode(mcgraph()->machine()->I32x4ExtractLane(lane),
inputs[0]);
@@ -4359,6 +4428,14 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
Node* const* inputs) {
has_simd_ = true;
switch (opcode) {
+ case wasm::kExprI64x2Shl:
+ return graph()->NewNode(mcgraph()->machine()->I64x2Shl(shift), inputs[0]);
+ case wasm::kExprI64x2ShrS:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrS(shift),
+ inputs[0]);
+ case wasm::kExprI64x2ShrU:
+ return graph()->NewNode(mcgraph()->machine()->I64x2ShrU(shift),
+ inputs[0]);
case wasm::kExprI32x4Shl:
return graph()->NewNode(mcgraph()->machine()->I32x4Shl(shift), inputs[0]);
case wasm::kExprI32x4ShrS:
@@ -4612,6 +4689,11 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
return SetEffect(node);
}
+Node* WasmGraphBuilder::AtomicFence() {
+ return SetEffect(graph()->NewNode(mcgraph()->machine()->MemBarrier(),
+ Effect(), Control()));
+}
+
#undef ATOMIC_BINOP_LIST
#undef ATOMIC_CMP_EXCHG_LIST
#undef ATOMIC_LOAD_LIST
@@ -4636,8 +4718,19 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
CheckDataSegmentIsPassiveAndNotDropped(data_segment_index, position);
- Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
auto m = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+
+ Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
Node* seg_index = Uint32Constant(data_segment_index);
Node* src_fail;
@@ -4679,9 +4772,16 @@ Node* WasmGraphBuilder::MemoryInit(uint32_t data_segment_index, Node* dst,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
@@ -4699,16 +4799,19 @@ Node* WasmGraphBuilder::DataDrop(uint32_t data_segment_index,
Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
wasm::WasmCodePosition position) {
auto m = mcgraph()->machine();
- // The data must be copied backward if the regions overlap and src < dst. The
- // regions overlap if {src + size > dst && dst + size > src}. Since we already
- // test that {src < dst}, we know that {dst + size > src}, so this simplifies
- // to just {src + size > dst}. That sum can overflow, but if we subtract
- // {size} from both sides of the inequality we get the equivalent test
- // {size > dst - src}.
- Node* copy_backward = graph()->NewNode(
- m->Word32And(), graph()->NewNode(m->Uint32LessThan(), src, dst),
- graph()->NewNode(m->Uint32LessThan(),
- graph()->NewNode(m->Int32Sub(), dst, src), size));
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check =
+ graph()->NewNode(m->Word32Equal(), size, mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
+ // The data must be copied backward if src < dst.
+ Node* copy_backward = graph()->NewNode(m->Uint32LessThan(), src, dst);
Node* dst_fail = BoundsCheckMemRange(&dst, &size, position);
@@ -4728,13 +4831,32 @@ Node* WasmGraphBuilder::MemoryCopy(Node* dst, Node* src, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, src, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds,
- graph()->NewNode(m->Word32Or(), dst_fail, src_fail),
- position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds,
+ graph()->NewNode(m->Word32Or(), dst_fail, src_fail), position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
wasm::WasmCodePosition position) {
+ auto machine = mcgraph()->machine();
+ auto common = mcgraph()->common();
+ // If size == 0, then memory.copy is a no-op.
+ Node* size_null_check = graph()->NewNode(machine->Word32Equal(), size,
+ mcgraph()->Int32Constant(0));
+ Node* size_null_branch = graph()->NewNode(common->Branch(BranchHint::kFalse),
+ size_null_check, Control());
+
+ Node* size_null_etrue = Effect();
+ Node* size_null_if_false =
+ graph()->NewNode(common->IfFalse(), size_null_branch);
+ SetControl(size_null_if_false);
Node* fail = BoundsCheckMemRange(&dst, &size, position);
Node* function = graph()->NewNode(mcgraph()->common()->ExternalConstant(
ExternalReference::wasm_memory_fill()));
@@ -4742,7 +4864,15 @@ Node* WasmGraphBuilder::MemoryFill(Node* dst, Node* value, Node* size,
MachineType::Uint32()};
MachineSignature sig(0, 3, sig_types);
BuildCCall(&sig, function, dst, value, size);
- return TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ TrapIfTrue(wasm::kTrapMemOutOfBounds, fail, position);
+ Node* size_null_if_true =
+ graph()->NewNode(common->IfTrue(), size_null_branch);
+
+ Node* merge = SetControl(
+ graph()->NewNode(common->Merge(2), size_null_if_true, Control()));
+ SetEffect(
+ graph()->NewNode(common->EffectPhi(2), size_null_etrue, Effect(), merge));
+ return merge;
}
Node* WasmGraphBuilder::CheckElemSegmentIsPassiveAndNotDropped(
@@ -4789,13 +4919,13 @@ Node* WasmGraphBuilder::ElemDrop(uint32_t elem_segment_index,
mcgraph()->Int32Constant(1), Effect(), Control()));
}
-Node* WasmGraphBuilder::TableCopy(uint32_t table_src_index,
- uint32_t table_dst_index, Node* dst,
+Node* WasmGraphBuilder::TableCopy(uint32_t table_dst_index,
+ uint32_t table_src_index, Node* dst,
Node* src, Node* size,
wasm::WasmCodePosition position) {
Node* args[] = {
- graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
graph()->NewNode(mcgraph()->common()->NumberConstant(table_dst_index)),
+ graph()->NewNode(mcgraph()->common()->NumberConstant(table_src_index)),
BuildConvertUint32ToSmiWithSaturation(dst, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(src, FLAG_wasm_max_table_size),
BuildConvertUint32ToSmiWithSaturation(size, FLAG_wasm_max_table_size)};
@@ -4878,28 +5008,6 @@ void WasmGraphBuilder::RemoveBytecodePositionDecorator() {
}
namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(4, 5)
-void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
- Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(tag, AbstractCode::cast(*code), *name_str));
-}
-
class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* sig,
@@ -4914,12 +5022,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildAllocateHeapNumberWithValue(Node* value, Node* control) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
- Node* target = (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
- ? mcgraph()->RelocatableIntPtrConstant(
- wasm::WasmCode::kWasmAllocateHeapNumber,
- RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(
- BUILTIN_CODE(isolate_, AllocateHeapNumber));
+ Node* target =
+ (stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
+ ? mcgraph()->RelocatableIntPtrConstant(
+ wasm::WasmCode::kWasmAllocateHeapNumber,
+ RelocInfo::WASM_STUB_CALL)
+ : BuildLoadBuiltinFromInstance(Builtins::kAllocateHeapNumber);
if (!allocate_heap_number_operator_.is_set()) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
mcgraph()->zone(), AllocateHeapNumberDescriptor(), 0,
@@ -4956,6 +5064,34 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return mcgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
+ Node* BuildLoadUndefinedValueFromInstance() {
+ if (undefined_value_node_ == nullptr) {
+ Node* isolate_root = graph()->NewNode(
+ mcgraph()->machine()->Load(MachineType::Pointer()),
+ instance_node_.get(),
+ mcgraph()->Int32Constant(WASM_INSTANCE_OBJECT_OFFSET(IsolateRoot)),
+ graph()->start(), graph()->start());
+ undefined_value_node_ = InsertDecompressionIfNeeded(
+ MachineType::TypeCompressedTaggedPointer(),
+ graph()->NewNode(
+ mcgraph()->machine()->Load(
+ MachineType::TypeCompressedTaggedPointer()),
+ isolate_root,
+ mcgraph()->Int32Constant(
+ IsolateData::root_slot_offset(RootIndex::kUndefinedValue)),
+ isolate_root, graph()->start()));
+ }
+ return undefined_value_node_.get();
+ }
+
+ Node* BuildLoadBuiltinFromInstance(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ Node* isolate_root =
+ LOAD_INSTANCE_FIELD(IsolateRoot, MachineType::Pointer());
+ return LOAD_TAGGED_POINTER(isolate_root,
+ IsolateData::builtin_slot_offset(builtin_index));
+ }
+
Node* BuildChangeInt32ToTagged(Node* value) {
MachineOperatorBuilder* machine = mcgraph()->machine();
CommonOperatorBuilder* common = mcgraph()->common();
@@ -5096,7 +5232,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmToNumber, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, ToNumber));
+ : BuildLoadBuiltinFromInstance(Builtins::kToNumber);
Node* result = SetEffect(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), stub_code,
@@ -5126,8 +5262,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
SetControl(is_heap_object.if_true);
Node* orig_effect = Effect();
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* check_undefined =
graph()->NewNode(machine->WordEqual(), value, undefined_node);
Node* effect_tagged = Effect();
@@ -5173,8 +5308,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
case wasm::kWasmAnyRef:
- case wasm::kWasmAnyFunc:
- case wasm::kWasmExceptRef:
+ case wasm::kWasmFuncRef:
+ case wasm::kWasmExnRef:
return node;
default:
UNREACHABLE();
@@ -5196,7 +5331,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmI64ToBigInt, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, I64ToBigInt));
+ : BuildLoadBuiltinFromInstance(Builtins::kI64ToBigInt);
return SetEffect(
SetControl(graph()->NewNode(mcgraph()->common()->Call(call_descriptor),
@@ -5218,7 +5353,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
(stub_mode_ == StubCallMode::kCallWasmRuntimeStub)
? mcgraph()->RelocatableIntPtrConstant(
wasm::WasmCode::kWasmBigIntToI64, RelocInfo::WASM_STUB_CALL)
- : jsgraph()->HeapConstant(BUILTIN_CODE(isolate_, BigIntToI64));
+ : BuildLoadBuiltinFromInstance(Builtins::kBigIntToI64);
return SetEffect(SetControl(
graph()->NewNode(mcgraph()->common()->Call(call_descriptor), target,
@@ -5228,15 +5363,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* FromJS(Node* node, Node* js_context, wasm::ValueType type) {
DCHECK_NE(wasm::kWasmStmt, type);
- // The parameter is of type anyref or except_ref, we take it as is.
- if (type == wasm::kWasmAnyRef || type == wasm::kWasmExceptRef) {
+ // The parameter is of type anyref or exnref, we take it as is.
+ if (type == wasm::kWasmAnyRef || type == wasm::kWasmExnRef) {
return node;
}
- if (type == wasm::kWasmAnyFunc) {
+ if (type == wasm::kWasmFuncRef) {
Node* check =
BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidAnyFuncValue, js_context, &node, 1, effect_,
+ Runtime::kWasmIsValidFuncRefValue, js_context, &node, 1, effect_,
Control())));
Diamond type_check(graph(), mcgraph()->common(), check,
@@ -5471,8 +5606,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The callable is passed as the last parameter, after WASM arguments.
Node* callable_node = Param(wasm_count + 1);
- Node* undefined_node = LOAD_INSTANCE_FIELD(
- UndefinedValue, MachineType::TypeCompressedTaggedPointer());
+ Node* undefined_node = BuildLoadUndefinedValueFromInstance();
Node* call = nullptr;
bool sloppy_receiver = true;
@@ -5811,22 +5945,26 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
void BuildCWasmEntry() {
- // Build the start and the JS parameter nodes.
- SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 5)));
+ // +1 offset for first parameter index being -1.
+ SetEffect(SetControl(Start(CWasmEntryParameters::kNumParameters + 1)));
- // Create parameter nodes (offset by 1 for the receiver parameter).
- Node* code_entry = Param(CWasmEntryParameters::kCodeEntry + 1);
- Node* object_ref_node = Param(CWasmEntryParameters::kObjectRef + 1);
- Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer + 1);
+ Node* code_entry = Param(CWasmEntryParameters::kCodeEntry);
+ Node* object_ref = Param(CWasmEntryParameters::kObjectRef);
+ Node* arg_buffer = Param(CWasmEntryParameters::kArgumentsBuffer);
+ Node* c_entry_fp = Param(CWasmEntryParameters::kCEntryFp);
+
+ Node* fp_value = graph()->NewNode(mcgraph()->machine()->LoadFramePointer());
+ STORE_RAW(fp_value, TypedFrameConstants::kFirstPushedFrameValueOffset,
+ c_entry_fp, MachineType::PointerRepresentation(),
+ kNoWriteBarrier);
int wasm_arg_count = static_cast<int>(sig_->parameter_count());
- int arg_count =
- wasm_arg_count + 4; // code, object_ref_node, control, effect
+ int arg_count = wasm_arg_count + 4; // code, object_ref, control, effect
Node** args = Buffer(arg_count);
int pos = 0;
args[pos++] = code_entry;
- args[pos++] = object_ref_node;
+ args[pos++] = object_ref;
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -5847,26 +5985,43 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* call = SetEffect(graph()->NewNode(
mcgraph()->common()->Call(call_descriptor), arg_count, args));
- // Store the return value.
- DCHECK_GE(1, sig_->return_count());
- if (sig_->return_count() == 1) {
+ Node* if_success = graph()->NewNode(mcgraph()->common()->IfSuccess(), call);
+ Node* if_exception =
+ graph()->NewNode(mcgraph()->common()->IfException(), call, call);
+
+ // Handle exception: return it.
+ SetControl(if_exception);
+ Return(if_exception);
+
+ // Handle success: store the return value(s).
+ SetControl(if_success);
+ pos = 0;
+ offset = 0;
+ for (wasm::ValueType type : sig_->returns()) {
StoreRepresentation store_rep(
- wasm::ValueTypes::MachineRepresentationFor(sig_->GetReturn()),
- kNoWriteBarrier);
+ wasm::ValueTypes::MachineRepresentationFor(type), kNoWriteBarrier);
+ Node* value = sig_->return_count() == 1
+ ? call
+ : graph()->NewNode(mcgraph()->common()->Projection(pos),
+ call, Control());
SetEffect(graph()->NewNode(mcgraph()->machine()->Store(store_rep),
- arg_buffer, Int32Constant(0), call, Effect(),
- Control()));
+ arg_buffer, Int32Constant(offset), value,
+ Effect(), Control()));
+ offset += wasm::ValueTypes::ElementSizeInBytes(type);
+ pos++;
}
+
Return(jsgraph()->SmiConstant(0));
if (mcgraph()->machine()->Is32() && ContainsInt64(sig_)) {
MachineRepresentation sig_reps[] = {
- MachineRepresentation::kWord32, // return value
- MachineRepresentation::kTagged, // receiver
- MachineRepresentation::kTagged, // arg0 (code)
- MachineRepresentation::kTagged // arg1 (buffer)
+ MachineType::PointerRepresentation(), // return value
+ MachineType::PointerRepresentation(), // target
+ MachineRepresentation::kTagged, // object_ref
+ MachineType::PointerRepresentation(), // argv
+ MachineType::PointerRepresentation() // c_entry_fp
};
- Signature<MachineRepresentation> c_entry_sig(1, 2, sig_reps);
+ Signature<MachineRepresentation> c_entry_sig(1, 4, sig_reps);
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(),
mcgraph()->common(), mcgraph()->zone(), &c_entry_sig);
r.LowerGraph();
@@ -5879,6 +6034,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Isolate* const isolate_;
JSGraph* jsgraph_;
StubCallMode stub_mode_;
+ SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
wasm::WasmFeatures enabled_features_;
};
@@ -5901,27 +6057,25 @@ void AppendSignature(char* buffer, size_t max_name_len,
} // namespace
-MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
- wasm::FunctionSig* sig,
- bool is_import) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompileJSToWasmWrapper");
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import) {
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -5929,73 +6083,66 @@ MaybeHandle<Code> CompileJSToWasmWrapper(Isolate* isolate,
builder.BuildJSToWasmWrapper(is_import);
//----------------------------------------------------------------------------
- // Run the compilation pipeline.
+ // Create the compilation job.
//----------------------------------------------------------------------------
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "js_to_wasm:";
- AppendSignature(debug_name, kMaxNameLen, sig);
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "js_to_wasm:", 12);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
- // Schedule and compile to machine code.
int params = static_cast<int>(sig->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, params + 1, CallDescriptor::kNoFlags);
+ zone.get(), false, params + 1, CallDescriptor::kNoFlags);
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::JS_TO_WASM_FUNCTION, debug_name,
- WasmAssemblerOptions());
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
-
- if (must_record_function_compilation(isolate)) {
- RecordFunctionCompilation(CodeEventListener::STUB_TAG, isolate, code, "%s",
- debug_name);
- }
-
- return code;
+ return Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::JS_TO_WASM_FUNCTION,
+ std::move(debug_name), WasmAssemblerOptions());
}
-WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
- wasm::FunctionSig* expected_sig,
- bool has_bigint_feature) {
- if (WasmExportedFunction::IsWasmExportedFunction(*target)) {
- auto imported_function = WasmExportedFunction::cast(*target);
- auto func_index = imported_function.function_index();
- auto module = imported_function.instance().module();
+std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
+ Handle<JSReceiver> callable, wasm::FunctionSig* expected_sig,
+ bool has_bigint_feature) {
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ auto imported_function = Handle<WasmExportedFunction>::cast(callable);
+ auto func_index = imported_function->function_index();
+ auto module = imported_function->instance().module();
wasm::FunctionSig* imported_sig = module->functions[func_index].sig;
if (*imported_sig != *expected_sig) {
- return WasmImportCallKind::kLinkError;
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- if (static_cast<uint32_t>(func_index) < module->num_imported_functions) {
- // TODO(wasm): this redirects all imported-reexported functions
- // through the call builtin. Fall through to JS function cases below?
- return WasmImportCallKind::kUseCallBuiltin;
+ if (static_cast<uint32_t>(func_index) >= module->num_imported_functions) {
+ return std::make_pair(WasmImportCallKind::kWasmToWasm, callable);
}
- return WasmImportCallKind::kWasmToWasm;
- }
- if (WasmCapiFunction::IsWasmCapiFunction(*target)) {
- WasmCapiFunction capi_function = WasmCapiFunction::cast(*target);
- if (!capi_function.IsSignatureEqual(expected_sig)) {
- return WasmImportCallKind::kLinkError;
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ Handle<WasmInstanceObject> instance(imported_function->instance(), isolate);
+ ImportedFunctionEntry entry(instance, func_index);
+ callable = handle(entry.callable(), isolate);
+ }
+ if (WasmJSFunction::IsWasmJSFunction(*callable)) {
+ auto js_function = Handle<WasmJSFunction>::cast(callable);
+ if (!js_function->MatchesSignature(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
+ }
+ Isolate* isolate = callable->GetIsolate();
+ // Resolve the short-cut to the underlying callable and continue.
+ callable = handle(js_function->GetCallable(), isolate);
+ }
+ if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
+ auto capi_function = Handle<WasmCapiFunction>::cast(callable);
+ if (!capi_function->IsSignatureEqual(expected_sig)) {
+ return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
- return WasmImportCallKind::kWasmToCapi;
+ return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
}
// Assuming we are calling to JS, check whether this would be a runtime error.
if (!wasm::IsJSCompatibleSignature(expected_sig, has_bigint_feature)) {
- return WasmImportCallKind::kRuntimeTypeError;
+ return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
}
// For JavaScript calls, determine whether the target has an arity match
// and whether it has a sloppy receiver.
- if (target->IsJSFunction()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(target);
+ if (callable->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
SharedFunctionInfo shared = function->shared();
// Check for math intrinsics.
@@ -6004,7 +6151,9 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
wasm::FunctionSig* sig = wasm::WasmOpcodes::Signature(wasm::kExpr##name); \
if (!sig) sig = wasm::WasmOpcodes::AsmjsSignature(wasm::kExpr##name); \
DCHECK_NOT_NULL(sig); \
- if (*expected_sig == *sig) return WasmImportCallKind::k##name; \
+ if (*expected_sig == *sig) { \
+ return std::make_pair(WasmImportCallKind::k##name, callable); \
+ } \
}
#define COMPARE_SIG_FOR_BUILTIN_F64(name) \
case Builtins::kMath##name: \
@@ -6051,19 +6200,23 @@ WasmImportCallKind GetWasmImportCallKind(Handle<JSReceiver> target,
if (IsClassConstructor(shared.kind())) {
// Class constructor will throw anyway.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
bool sloppy = is_sloppy(shared.language_mode()) && !shared.native();
if (shared.internal_formal_parameter_count() ==
expected_sig->parameter_count()) {
- return sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
- : WasmImportCallKind::kJSFunctionArityMatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMatch,
+ callable);
}
- return sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
- : WasmImportCallKind::kJSFunctionArityMismatch;
+ return std::make_pair(
+ sloppy ? WasmImportCallKind::kJSFunctionArityMismatchSloppy
+ : WasmImportCallKind::kJSFunctionArityMismatch,
+ callable);
}
// Unknown case. Use the call builtin.
- return WasmImportCallKind::kUseCallBuiltin;
+ return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
@@ -6103,10 +6256,9 @@ wasm::WasmOpcode GetMathIntrinsicOpcode(WasmImportCallKind kind,
#undef CASE
}
-wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig) {
+wasm::WasmCompilationResult CompileWasmMathIntrinsic(
+ wasm::WasmEngine* wasm_engine, WasmImportCallKind kind,
+ wasm::FunctionSig* sig) {
DCHECK_EQ(1, sig->return_count());
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6125,7 +6277,7 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
InstructionSelector::AlignmentRequirements()));
wasm::CompilationEnv env(
- native_module->module(), wasm::UseTrapHandler::kNoTrapHandler,
+ nullptr, wasm::UseTrapHandler::kNoTrapHandler,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
wasm::kAllWasmFeatures, wasm::LowerSimd::kNoLowerSimd);
@@ -6167,21 +6319,12 @@ wasm::WasmCode* CompileWasmMathIntrinsic(wasm::WasmEngine* wasm_engine,
wasm_engine, call_descriptor, mcgraph, Code::WASM_FUNCTION,
wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kFunction,
- wasm::ExecutionTier::kNone);
- // TODO(titzer): add counters for math intrinsic code size / allocation
- return native_module->PublishCode(std::move(wasm_code));
+ return result;
}
-wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
- wasm::NativeModule* native_module,
- WasmImportCallKind kind,
- wasm::FunctionSig* sig,
- bool source_positions) {
+wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine* wasm_engine, wasm::CompilationEnv* env,
+ WasmImportCallKind kind, wasm::FunctionSig* sig, bool source_positions) {
DCHECK_NE(WasmImportCallKind::kLinkError, kind);
DCHECK_NE(WasmImportCallKind::kWasmToWasm, kind);
@@ -6189,7 +6332,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
if (FLAG_wasm_math_intrinsics &&
kind >= WasmImportCallKind::kFirstMathIntrinsic &&
kind <= WasmImportCallKind::kLastMathIntrinsic) {
- return CompileWasmMathIntrinsic(wasm_engine, native_module, kind, sig);
+ return CompileWasmMathIntrinsic(wasm_engine, kind, sig);
}
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
@@ -6214,7 +6357,7 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, source_position_table,
StubCallMode::kCallWasmRuntimeStub,
- native_module->enabled_features());
+ env->enabled_features);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmImportCallWrapper(kind);
@@ -6232,13 +6375,8 @@ wasm::WasmCode* CompileWasmImportCallWrapper(wasm::WasmEngine* wasm_engine,
wasm_engine, incoming, &jsgraph, Code::WASM_TO_JS_FUNCTION,
wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
source_position_table);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
- std::move(result.source_positions), wasm::WasmCode::kWasmToJsWrapper,
- wasm::ExecutionTier::kNone);
- return native_module->PublishCode(std::move(wasm_code));
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ return result;
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
@@ -6290,9 +6428,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::WasmCode::kAnonymousFuncIndex, result.code_desc,
- result.frame_slot_count, result.tagged_parameter_slots,
- std::move(result.protected_instructions),
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots, std::move(result.protected_instructions),
std::move(result.source_positions), wasm::WasmCode::kWasmToCapiWrapper,
wasm::ExecutionTier::kNone);
return native_module->PublishCode(std::move(wasm_code));
@@ -6338,24 +6475,26 @@ wasm::WasmCompilationResult CompileWasmInterpreterEntry(
wasm::WasmCode::kInterpreterEntry, func_name.begin(),
WasmStubAssemblerOptions());
result.result_tier = wasm::ExecutionTier::kInterpreter;
+ result.kind = wasm::WasmCompilationResult::kInterpreterEntry;
return result;
}
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
- Zone zone(isolate->allocator(), ZONE_NAME);
- Graph graph(&zone);
- CommonOperatorBuilder common(&zone);
+ std::unique_ptr<Zone> zone =
+ base::make_unique<Zone>(isolate->allocator(), ZONE_NAME);
+ Graph* graph = new (zone.get()) Graph(zone.get());
+ CommonOperatorBuilder common(zone.get());
MachineOperatorBuilder machine(
- &zone, MachineType::PointerRepresentation(),
+ zone.get(), MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements());
- JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+ JSGraph jsgraph(isolate, graph, &common, nullptr, nullptr, &machine);
Node* control = nullptr;
Node* effect = nullptr;
- WasmWrapperGraphBuilder builder(&zone, &jsgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), &jsgraph, sig, nullptr,
StubCallMode::kCallCodeObject,
wasm::WasmFeaturesFromIsolate(isolate));
builder.set_control_ptr(&control);
@@ -6363,29 +6502,36 @@ MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig) {
builder.BuildCWasmEntry();
// Schedule and compile to machine code.
- CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
- &zone, false, CWasmEntryParameters::kNumParameters + 1,
- CallDescriptor::kNoFlags);
+ MachineType sig_types[] = {MachineType::Pointer(), // return
+ MachineType::Pointer(), // target
+ MachineType::AnyTagged(), // object_ref
+ MachineType::Pointer(), // argv
+ MachineType::Pointer()}; // c_entry_fp
+ MachineSignature incoming_sig(1, 4, sig_types);
+ // Traps need the root register, for TailCallRuntimeWithCEntry to call
+ // Runtime::kThrowWasmError.
+ bool initialize_root_flag = true;
+ CallDescriptor* incoming = Linkage::GetSimplifiedCDescriptor(
+ zone.get(), &incoming_sig, initialize_root_flag);
// Build a name in the form "c-wasm-entry:<params>:<returns>".
static constexpr size_t kMaxNameLen = 128;
- char debug_name[kMaxNameLen] = "c-wasm-entry:";
- AppendSignature(debug_name, kMaxNameLen, sig);
-
- MaybeHandle<Code> maybe_code = Pipeline::GenerateCodeForWasmHeapStub(
- isolate, incoming, &graph, Code::C_WASM_ENTRY, debug_name,
- AssemblerOptions::Default(isolate));
- Handle<Code> code;
- if (!maybe_code.ToHandle(&code)) {
- return maybe_code;
- }
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_opt_code) {
- CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
- OFStream os(tracing_scope.file());
- code->Disassemble(debug_name, os);
- }
-#endif
+ auto debug_name = std::unique_ptr<char[]>(new char[kMaxNameLen]);
+ memcpy(debug_name.get(), "c-wasm-entry:", 14);
+ AppendSignature(debug_name.get(), kMaxNameLen, sig);
+
+ // Run the compilation job synchronously.
+ std::unique_ptr<OptimizedCompilationJob> job(
+ Pipeline::NewWasmHeapStubCompilationJob(
+ isolate, incoming, std::move(zone), graph, Code::C_WASM_ENTRY,
+ std::move(debug_name), AssemblerOptions::Default(isolate)));
+
+ if (job->PrepareJob(isolate) == CompilationJob::FAILED ||
+ job->ExecuteJob() == CompilationJob::FAILED ||
+ job->FinalizeJob(isolate) == CompilationJob::FAILED) {
+ return {};
+ }
+ Handle<Code> code = job->compilation_info()->code();
return code;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 460d0d2f1b..315733c396 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_WASM_COMPILER_H_
#include <memory>
+#include <utility>
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
@@ -20,6 +21,7 @@
namespace v8 {
namespace internal {
struct AssemblerOptions;
+class OptimizedCompilationJob;
namespace compiler {
// Forward declarations for some compiler data structures.
@@ -103,13 +105,23 @@ enum class WasmImportCallKind : uint8_t {
kUseCallBuiltin
};
-V8_EXPORT_PRIVATE WasmImportCallKind
-GetWasmImportCallKind(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
+// TODO(wasm): There should be only one import kind for sloppy and strict in
+// order to reduce wrapper cache misses. The mode can be checked at runtime
+// instead.
+constexpr WasmImportCallKind kDefaultImportCallKind =
+ WasmImportCallKind::kJSFunctionArityMatchSloppy;
+
+// Resolves which import call wrapper is required for the given JS callable.
+// Returns the kind of wrapper need and the ultimate target callable. Note that
+// some callables (e.g. a {WasmExportedFunction} or {WasmJSFunction}) just wrap
+// another target, which is why the ultimate target is returned as well.
+V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
+ResolveWasmImportCall(Handle<JSReceiver> callable, wasm::FunctionSig* sig,
bool has_bigint_feature);
// Compiles an import call wrapper, which allows WASM to call imports.
-V8_EXPORT_PRIVATE wasm::WasmCode* CompileWasmImportCallWrapper(
- wasm::WasmEngine*, wasm::NativeModule*, WasmImportCallKind,
+V8_EXPORT_PRIVATE wasm::WasmCompilationResult CompileWasmImportCallWrapper(
+ wasm::WasmEngine*, wasm::CompilationEnv* env, WasmImportCallKind,
wasm::FunctionSig*, bool source_positions);
// Compiles a host call wrapper, which allows WASM to call host functions.
@@ -117,11 +129,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
wasm::NativeModule*,
wasm::FunctionSig*, Address address);
-// Creates a code object calling a wasm function with the given signature,
-// callable from JS.
-V8_EXPORT_PRIVATE MaybeHandle<Code> CompileJSToWasmWrapper(Isolate*,
- wasm::FunctionSig*,
- bool is_import);
+// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
+std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
+ Isolate* isolate, wasm::FunctionSig* sig, bool is_import);
// Compiles a stub that redirects a call to a wasm function to the wasm
// interpreter. It's ABI compatible with the compiled wasm function.
@@ -133,13 +143,13 @@ enum CWasmEntryParameters {
kCodeEntry,
kObjectRef,
kArgumentsBuffer,
+ kCEntryFp,
// marker:
kNumParameters
};
-// Compiles a stub with JS linkage, taking parameters as described by
-// {CWasmEntryParameters}. It loads the wasm parameters from the argument
-// buffer and calls the wasm function given as first parameter.
+// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
+// which knows how to feed it its parameters.
MaybeHandle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig);
// Values from the instance object are cached between WASM-level function calls.
@@ -280,9 +290,9 @@ class WasmGraphBuilder {
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
- Node* GetTable(uint32_t table_index, Node* index,
+ Node* TableGet(uint32_t table_index, Node* index,
wasm::WasmCodePosition position);
- Node* SetTable(uint32_t table_index, Node* index, Node* val,
+ Node* TableSet(uint32_t table_index, Node* index, Node* val,
wasm::WasmCodePosition position);
//-----------------------------------------------------------------------
// Operations that concern the linear memory.
@@ -377,6 +387,7 @@ class WasmGraphBuilder {
Node* AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
uint32_t alignment, uint32_t offset,
wasm::WasmCodePosition position);
+ Node* AtomicFence();
// Returns a pointer to the dropped_data_segments array. Traps if the data
// segment is active or has been dropped.
@@ -395,7 +406,7 @@ class WasmGraphBuilder {
Node* TableInit(uint32_t table_index, uint32_t elem_segment_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* ElemDrop(uint32_t elem_segment_index, wasm::WasmCodePosition position);
- Node* TableCopy(uint32_t table_src_index, uint32_t table_dst_index, Node* dst,
+ Node* TableCopy(uint32_t table_dst_index, uint32_t table_src_index, Node* dst,
Node* src, Node* size, wasm::WasmCodePosition position);
Node* TableGrow(uint32_t table_index, Node* value, Node* delta);
Node* TableSize(uint32_t table_index);
@@ -485,10 +496,10 @@ class WasmGraphBuilder {
Node* BuildCallNode(wasm::FunctionSig* sig, Node** args,
wasm::WasmCodePosition position, Node* instance_node,
const Operator* op);
- // Special implementation for CallIndirect for table 0.
- Node* BuildIndirectCall(uint32_t sig_index, Node** args, Node*** rets,
- wasm::WasmCodePosition position,
- IsReturnCall continuation);
+ // Helper function for {BuildIndirectCall}.
+ void LoadIndirectFunctionTable(uint32_t table_index, Node** ift_size,
+ Node** ift_sig_ids, Node** ift_targets,
+ Node** ift_instances);
Node* BuildIndirectCall(uint32_t table_index, uint32_t sig_index, Node** args,
Node*** rets, wasm::WasmCodePosition position,
IsReturnCall continuation);
@@ -591,8 +602,6 @@ class WasmGraphBuilder {
return buf;
}
- Node* BuildLoadBuiltinFromInstance(int builtin_index);
-
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
// to get off the GC heap.