summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2020-11-13 12:51:53 +0100
committerMichaël Zasso <targos@protonmail.com>2020-11-15 16:46:54 +0100
commit48db20f6f53060e38b2272566b014741eb4f519f (patch)
treee2f9b4c7f69d2e4597b73b4c3c09f4371d5cc963 /deps/v8/src/compiler
parent79916428a48df937aa5b2b69c061d2d42181a76b (diff)
downloadnode-new-48db20f6f53060e38b2272566b014741eb4f519f.tar.gz
deps: update V8 to 8.7.220
PR-URL: https://github.com/nodejs/node/pull/35700 Reviewed-By: Rich Trott <rtrott@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Joyee Cheung <joyeec9h3@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Shelley Vohr <codebytere@gmail.com>
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/access-builder.cc21
-rw-r--r--deps/v8/src/compiler/access-builder.h2
-rw-r--r--deps/v8/src/compiler/access-info.cc16
-rw-r--r--deps/v8/src/compiler/allocation-builder-inl.h4
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc13
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h4
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc4
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc8
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc59
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h20
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc16
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc60
-rw-r--r--deps/v8/src/compiler/backend/code-generator-impl.h6
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc12
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h8
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc35
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h4
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc4
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc23
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h2
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc125
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h18
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.cc199
-rw-r--r--deps/v8/src/compiler/backend/live-range-separator.h61
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc29
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h4
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc21
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc90
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h4
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc58
-rw-r--r--deps/v8/src/compiler/backend/ppc/OWNERS6
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc172
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h19
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc19
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc45
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.cc688
-rw-r--r--deps/v8/src/compiler/backend/register-allocator.h69
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc361
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h4
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc4
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc12
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc326
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h4
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc47
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc148
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h7
-rw-r--r--deps/v8/src/compiler/c-linkage.cc131
-rw-r--r--deps/v8/src/compiler/code-assembler.cc7
-rw-r--r--deps/v8/src/compiler/common-operator.h73
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc8
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h4
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc283
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc9
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc76
-rw-r--r--deps/v8/src/compiler/graph-assembler.h50
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc15
-rw-r--r--deps/v8/src/compiler/graph-reducer.h5
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc3
-rw-r--r--deps/v8/src/compiler/heap-refs.h128
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc81
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h5
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc7
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc101
-rw-r--r--deps/v8/src/compiler/js-graph.cc3
-rw-r--r--deps/v8/src/compiler/js-graph.h1
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc1344
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h31
-rw-r--r--deps/v8/src/compiler/js-heap-copy-reducer.cc7
-rw-r--r--deps/v8/src/compiler/js-inlining.cc4
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc11
-rw-r--r--deps/v8/src/compiler/js-operator.cc14
-rw-r--r--deps/v8/src/compiler/js-operator.h17
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc34
-rw-r--r--deps/v8/src/compiler/linkage.cc11
-rw-r--r--deps/v8/src/compiler/linkage.h44
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator.cc8
-rw-r--r--deps/v8/src/compiler/machine-operator.h6
-rw-r--r--deps/v8/src/compiler/opcodes.h55
-rw-r--r--deps/v8/src/compiler/operator-properties.cc2
-rw-r--r--deps/v8/src/compiler/pipeline.cc137
-rw-r--r--deps/v8/src/compiler/processed-feedback.h7
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc54
-rw-r--r--deps/v8/src/compiler/scheduled-machine-lowering.cc2
-rw-r--r--deps/v8/src/compiler/serializer-for-background-compilation.cc8
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc569
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc1
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc35
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc32
-rw-r--r--deps/v8/src/compiler/simplified-operator.h39
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc3
-rw-r--r--deps/v8/src/compiler/typer.cc9
-rw-r--r--deps/v8/src/compiler/verifier.cc4
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc441
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h17
100 files changed, 3976 insertions, 2845 deletions
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 1b3b2752b6..f9d15264e6 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -17,7 +17,7 @@
#include "src/objects/objects-inl.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/source-text-module.h"
-#include "torque-generated/exported-class-definitions-tq.h"
+#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -871,6 +871,15 @@ FieldAccess AccessBuilder::ForFeedbackVectorSlot(int index) {
}
// static
+FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
+ int offset = WeakFixedArray::OffsetOfElementAt(index);
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+// static
FieldAccess AccessBuilder::ForCellValue() {
FieldAccess access = {kTaggedBase, Cell::kValueOffset,
Handle<Name>(), MaybeHandle<Map>(),
@@ -1239,6 +1248,16 @@ FieldAccess AccessBuilder::ForFeedbackVectorClosureFeedbackCellArray() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi() {
+ FieldAccess access = {
+ kTaggedBase, FeedbackVector::kOptimizedCodeWeakOrSmiOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 52c1261ff0..af5882988d 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -293,6 +293,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to WeakFixedArray elements.
static ElementAccess ForWeakFixedArrayElement();
+ static FieldAccess ForWeakFixedArraySlot(int index);
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
@@ -344,6 +345,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to a FeedbackVector fields.
static FieldAccess ForFeedbackVectorClosureFeedbackCellArray();
+ static FieldAccess ForFeedbackVectorOptimizedCodeWeakOrSmi();
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 4fb5ebd69c..046927e943 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -40,6 +40,19 @@ bool CanInlinePropertyAccess(Handle<Map> map) {
!map->is_access_check_needed();
}
+#ifdef DEBUG
+bool HasFieldRepresentationDependenciesOnMap(
+ ZoneVector<CompilationDependency const*>& dependencies,
+ Handle<Map> const& field_owner_map) {
+ for (auto dep : dependencies) {
+ if (dep->IsFieldRepresentationDependencyOnMap(field_owner_map)) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
} // namespace
@@ -84,6 +97,9 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, Representation field_representation,
Type field_type, Handle<Map> field_owner_map, MaybeHandle<Map> field_map,
MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map) {
+ DCHECK_IMPLIES(
+ field_representation.IsDouble(),
+ HasFieldRepresentationDependenciesOnMap(dependencies, field_owner_map));
return PropertyAccessInfo(kDataField, holder, transition_map, field_index,
field_representation, field_type, field_owner_map,
field_map, {{receiver_map}, zone},
diff --git a/deps/v8/src/compiler/allocation-builder-inl.h b/deps/v8/src/compiler/allocation-builder-inl.h
index 2b6109f49e..8a9d74e071 100644
--- a/deps/v8/src/compiler/allocation-builder-inl.h
+++ b/deps/v8/src/compiler/allocation-builder-inl.h
@@ -8,8 +8,8 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/objects/map-inl.h"
-#include "torque-generated/exported-class-definitions-tq-inl.h"
-#include "torque-generated/exported-class-definitions-tq.h"
+#include "torque-generated/exported-class-definitions-inl.h"
+#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index b01297e03a..2c7e856239 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -2026,7 +2026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register rhs = i.InputSimd128Register(1);
DCHECK_EQ(dst, lhs);
- // Move rhs only when rhs is strictly greater (mi).
+ // Move rhs only when rhs is strictly lesser (mi).
__ VFPCompareAndSetFlags(rhs.low(), lhs.low());
__ vmov(dst.low(), rhs.low(), mi);
__ VFPCompareAndSetFlags(rhs.high(), lhs.high());
@@ -2039,7 +2039,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register rhs = i.InputSimd128Register(1);
DCHECK_EQ(dst, lhs);
- // Move rhs only when rhs is strictly greater (mi).
+ // Move rhs only when rhs is strictly greater (gt).
__ VFPCompareAndSetFlags(rhs.low(), lhs.low());
__ vmov(dst.low(), rhs.low(), gt);
__ VFPCompareAndSetFlags(rhs.high(), lhs.high());
@@ -2150,7 +2150,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmI64x2Neg: {
Simd128Register dst = i.OutputSimd128Register();
__ vmov(dst, uint64_t{0});
- __ vqsub(NeonS64, dst, dst, i.InputSimd128Register(0));
+ __ vsub(Neon64, dst, dst, i.InputSimd128Register(0));
break;
}
case kArmI64x2Shl: {
@@ -3097,7 +3097,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1), i.InputInt4(2));
break;
}
- case kArmS8x16Swizzle: {
+ case kArmI8x16Swizzle: {
Simd128Register dst = i.OutputSimd128Register(),
tbl = i.InputSimd128Register(0),
src = i.InputSimd128Register(1);
@@ -3106,7 +3106,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vtbl(dst.high(), table, src.high());
break;
}
- case kArmS8x16Shuffle: {
+ case kArmI8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3648,9 +3648,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ Push(kJavaScriptCallArgCountRegister);
- }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index 64707cb612..b3ee561e27 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -307,8 +307,8 @@ namespace compiler {
V(ArmS8x16TransposeLeft) \
V(ArmS8x16TransposeRight) \
V(ArmS8x16Concat) \
- V(ArmS8x16Swizzle) \
- V(ArmS8x16Shuffle) \
+ V(ArmI8x16Swizzle) \
+ V(ArmI8x16Shuffle) \
V(ArmS32x2Reverse) \
V(ArmS16x4Reverse) \
V(ArmS16x2Reverse) \
diff --git a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
index aa9fa9e17b..6459d22a11 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-scheduler-arm.cc
@@ -287,8 +287,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS8x16TransposeLeft:
case kArmS8x16TransposeRight:
case kArmS8x16Concat:
- case kArmS8x16Swizzle:
- case kArmS8x16Shuffle:
+ case kArmI8x16Swizzle:
+ case kArmI8x16Shuffle:
case kArmS32x2Reverse:
case kArmS16x4Reverse:
case kArmS16x2Reverse:
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index c53c8f372e..e868a1a47a 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -2870,7 +2870,7 @@ void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -2923,18 +2923,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// Code generator uses vtbl, arrange sources to form a valid lookup table.
InstructionOperand src0, src1;
ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
- Emit(kArmS8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ Emit(kArmI8x16Shuffle, g.DefineAsRegister(node), src0, src1,
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
ArmOperandGenerator g(this);
// We don't want input 0 (the table) to be the same as output, since we will
// modify output twice (low and high), and need to keep the table the same.
- Emit(kArmS8x16Swizzle, g.DefineAsRegister(node),
+ Emit(kArmI8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index cee8651276..6524502408 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -2551,18 +2551,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).V16B(), i.InputInt4(2));
break;
}
- case kArm64S8x16Swizzle: {
+ case kArm64I8x16Swizzle: {
__ Tbl(i.OutputSimd128Register().V16B(), i.InputSimd128Register(0).V16B(),
i.InputSimd128Register(1).V16B());
break;
}
- case kArm64S8x16Shuffle: {
+ case kArm64I8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V16B(),
src0 = i.InputSimd128Register(0).V16B(),
src1 = i.InputSimd128Register(1).V16B();
// Unary shuffle table is in src0, binary shuffle table is in src0, src1,
// which must be consecutive.
- int64_t mask = 0;
+ uint32_t mask = 0;
if (src0 == src1) {
mask = 0x0F0F0F0F;
} else {
@@ -2601,20 +2601,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Add(i.OutputRegister32(), i.OutputRegister32(), 1);
break;
}
- case kArm64S8x16LoadSplat: {
- __ ld1r(i.OutputSimd128Register().V16B(), i.MemoryOperand(0));
- break;
- }
- case kArm64S16x8LoadSplat: {
- __ ld1r(i.OutputSimd128Register().V8H(), i.MemoryOperand(0));
- break;
- }
- case kArm64S32x4LoadSplat: {
- __ ld1r(i.OutputSimd128Register().V4S(), i.MemoryOperand(0));
- break;
- }
- case kArm64S64x2LoadSplat: {
- __ ld1r(i.OutputSimd128Register().V2D(), i.MemoryOperand(0));
+ case kArm64LoadSplat: {
+ VectorFormat f = VectorFormatFillQ(MiscField::decode(opcode));
+ __ ld1r(i.OutputSimd128Register().Format(f), i.MemoryOperand(0));
break;
}
case kArm64I16x8Load8x8S: {
@@ -2647,6 +2636,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Uxtl(i.OutputSimd128Register().V2D(), i.OutputSimd128Register().V2S());
break;
}
+ case kArm64S128LoadMem32Zero: {
+ __ Ldr(i.OutputSimd128Register().S(), i.MemoryOperand(0));
+ break;
+ }
+ case kArm64S128LoadMem64Zero: {
+ __ Ldr(i.OutputSimd128Register().D(), i.MemoryOperand(0));
+ break;
+ }
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
case Op: { \
UseScratchRegisterScope scope(tasm()); \
@@ -2657,13 +2654,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cset(i.OutputRegister32(), ne); \
break; \
}
- // for AnyTrue, the format does not matter, umaxv does not support 2D
- SIMD_REDUCE_OP_CASE(kArm64V64x2AnyTrue, Umaxv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64V32x4AnyTrue, Umaxv, kFormatS, 4S);
+ // For AnyTrue, the format does not matter.
+ SIMD_REDUCE_OP_CASE(kArm64V128AnyTrue, Umaxv, kFormatS, 4S);
SIMD_REDUCE_OP_CASE(kArm64V32x4AllTrue, Uminv, kFormatS, 4S);
- SIMD_REDUCE_OP_CASE(kArm64V16x8AnyTrue, Umaxv, kFormatH, 8H);
SIMD_REDUCE_OP_CASE(kArm64V16x8AllTrue, Uminv, kFormatH, 8H);
- SIMD_REDUCE_OP_CASE(kArm64V8x16AnyTrue, Umaxv, kFormatB, 16B);
SIMD_REDUCE_OP_CASE(kArm64V8x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
@@ -2911,7 +2905,12 @@ void CodeGenerator::AssembleConstructFrame() {
if (frame_access_state()->has_frame()) {
// Link the frame
if (call_descriptor->IsJSFunctionCall()) {
+ STATIC_ASSERT(InterpreterFrameConstants::kFixedFrameSize % 16 == 8);
+ DCHECK_EQ(required_slots % 2, 1);
__ Prologue();
+ // Update required_slots count since we have just claimed one extra slot.
+ STATIC_ASSERT(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
+ required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue;
} else {
__ Push<TurboAssembler::kSignLR>(lr, fp);
__ Mov(fp, sp);
@@ -2929,7 +2928,13 @@ void CodeGenerator::AssembleConstructFrame() {
// to allocate the remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ size_t unoptimized_frame_slots = osr_helper()->UnoptimizedFrameSlots();
+ DCHECK(call_descriptor->IsJSFunctionCall());
+ DCHECK_EQ(unoptimized_frame_slots % 2, 1);
+ // One unoptimized frame slot has already been claimed when the actual
+ // arguments count was pushed.
+ required_slots -=
+ unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
ResetSpeculationPoison();
}
@@ -2984,13 +2989,7 @@ void CodeGenerator::AssembleConstructFrame() {
// recording their argument count.
switch (call_descriptor->kind()) {
case CallDescriptor::kCallJSFunction:
- if (call_descriptor->PushArgumentCount()) {
- __ Claim(required_slots + 1); // Claim extra slot for argc.
- __ Str(kJavaScriptCallArgCountRegister,
- MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
- } else {
- __ Claim(required_slots);
- }
+ __ Claim(required_slots);
break;
case CallDescriptor::kCallCodeObject: {
UseScratchRegisterScope temps(tasm());
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index e74819d9d6..7f84a3504b 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -367,26 +367,20 @@ namespace compiler {
V(Arm64S8x16TransposeLeft) \
V(Arm64S8x16TransposeRight) \
V(Arm64S8x16Concat) \
- V(Arm64S8x16Swizzle) \
- V(Arm64S8x16Shuffle) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
V(Arm64S32x2Reverse) \
V(Arm64S16x4Reverse) \
V(Arm64S16x2Reverse) \
V(Arm64S8x8Reverse) \
V(Arm64S8x4Reverse) \
V(Arm64S8x2Reverse) \
- V(Arm64V64x2AnyTrue) \
+ V(Arm64V128AnyTrue) \
V(Arm64V64x2AllTrue) \
- V(Arm64V32x4AnyTrue) \
V(Arm64V32x4AllTrue) \
- V(Arm64V16x8AnyTrue) \
V(Arm64V16x8AllTrue) \
- V(Arm64V8x16AnyTrue) \
V(Arm64V8x16AllTrue) \
- V(Arm64S8x16LoadSplat) \
- V(Arm64S16x8LoadSplat) \
- V(Arm64S32x4LoadSplat) \
- V(Arm64S64x2LoadSplat) \
+ V(Arm64LoadSplat) \
V(Arm64I16x8Load8x8S) \
V(Arm64I16x8Load8x8U) \
V(Arm64I32x4Load16x4S) \
@@ -428,7 +422,11 @@ namespace compiler {
V(Arm64Word64AtomicCompareExchangeUint8) \
V(Arm64Word64AtomicCompareExchangeUint16) \
V(Arm64Word64AtomicCompareExchangeUint32) \
- V(Arm64Word64AtomicCompareExchangeUint64)
+ V(Arm64Word64AtomicCompareExchangeUint64) \
+ V(Arm64S128LoadMem32Zero) \
+ V(Arm64S128LoadMem64Zero)
+// TODO(v8:10930) Adding new codes before these atomic instructions causes a
+// mksnapshot error.
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 5d75c5147e..6c572d2a1c 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -337,21 +337,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S8x16TransposeLeft:
case kArm64S8x16TransposeRight:
case kArm64S8x16Concat:
- case kArm64S8x16Swizzle:
- case kArm64S8x16Shuffle:
+ case kArm64I8x16Swizzle:
+ case kArm64I8x16Shuffle:
case kArm64S32x2Reverse:
case kArm64S16x4Reverse:
case kArm64S16x2Reverse:
case kArm64S8x8Reverse:
case kArm64S8x4Reverse:
case kArm64S8x2Reverse:
- case kArm64V64x2AnyTrue:
+ case kArm64V128AnyTrue:
case kArm64V64x2AllTrue:
- case kArm64V32x4AnyTrue:
case kArm64V32x4AllTrue:
- case kArm64V16x8AnyTrue:
case kArm64V16x8AllTrue:
- case kArm64V8x16AnyTrue:
case kArm64V8x16AllTrue:
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
@@ -373,16 +370,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
case kArm64Peek:
- case kArm64S8x16LoadSplat:
- case kArm64S16x8LoadSplat:
- case kArm64S32x4LoadSplat:
- case kArm64S64x2LoadSplat:
+ case kArm64LoadSplat:
case kArm64I16x8Load8x8S:
case kArm64I16x8Load8x8U:
case kArm64I32x4Load16x4S:
case kArm64I32x4Load16x4U:
case kArm64I64x2Load32x2S:
case kArm64I64x2Load32x2U:
+ case kArm64S128LoadMem32Zero:
+ case kArm64S128LoadMem64Zero:
return kIsLoadOperation;
case kArm64Claim:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 5f19a6bb7c..fac7f9c1d1 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -607,19 +607,23 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
bool require_add = false;
switch (params.transformation) {
case LoadTransformation::kS8x16LoadSplat:
- opcode = kArm64S8x16LoadSplat;
+ opcode = kArm64LoadSplat;
+ opcode |= MiscField::encode(8);
require_add = true;
break;
case LoadTransformation::kS16x8LoadSplat:
- opcode = kArm64S16x8LoadSplat;
+ opcode = kArm64LoadSplat;
+ opcode |= MiscField::encode(16);
require_add = true;
break;
case LoadTransformation::kS32x4LoadSplat:
- opcode = kArm64S32x4LoadSplat;
+ opcode = kArm64LoadSplat;
+ opcode |= MiscField::encode(32);
require_add = true;
break;
case LoadTransformation::kS64x2LoadSplat:
- opcode = kArm64S64x2LoadSplat;
+ opcode = kArm64LoadSplat;
+ opcode |= MiscField::encode(64);
require_add = true;
break;
case LoadTransformation::kI16x8Load8x8S:
@@ -640,6 +644,12 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
case LoadTransformation::kI64x2Load32x2U:
opcode = kArm64I64x2Load32x2U;
break;
+ case LoadTransformation::kS128LoadMem32Zero:
+ opcode = kArm64S128LoadMem32Zero;
+ break;
+ case LoadTransformation::kS128LoadMem64Zero:
+ opcode = kArm64S128LoadMem64Zero;
+ break;
default:
UNIMPLEMENTED();
}
@@ -1397,7 +1407,7 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
V(Float64Max, kArm64Float64Max) \
V(Float32Min, kArm64Float32Min) \
V(Float64Min, kArm64Float64Min) \
- V(S8x16Swizzle, kArm64S8x16Swizzle)
+ V(I8x16Swizzle, kArm64I8x16Swizzle)
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1775,10 +1785,9 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
VisitRR(this, kArm64Sxtw, node);
}
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- Arm64OperandGenerator g(this);
- Node* value = node->InputAt(0);
- switch (value->opcode()) {
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -1805,26 +1814,31 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// 32-bit operations will write their result in a W register (implicitly
// clearing the top 32-bit of the corresponding X register) so the
// zero-extension is a no-op.
- EmitIdentity(node);
- return;
+ return true;
}
case IrOpcode::kLoad: {
// As for the operations above, a 32-bit load will implicitly clear the
// top 32 bits of the destination register.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- EmitIdentity(node);
- return;
+ return true;
default:
- break;
+ return false;
}
- break;
}
default:
- break;
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ return EmitIdentity(node);
}
Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
}
@@ -3225,13 +3239,13 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16Neg, kArm64I8x16Neg) \
V(I8x16Abs, kArm64I8x16Abs) \
V(S128Not, kArm64S128Not) \
- V(V64x2AnyTrue, kArm64V64x2AnyTrue) \
+ V(V64x2AnyTrue, kArm64V128AnyTrue) \
V(V64x2AllTrue, kArm64V64x2AllTrue) \
- V(V32x4AnyTrue, kArm64V32x4AnyTrue) \
+ V(V32x4AnyTrue, kArm64V128AnyTrue) \
V(V32x4AllTrue, kArm64V32x4AllTrue) \
- V(V16x8AnyTrue, kArm64V16x8AnyTrue) \
+ V(V16x8AnyTrue, kArm64V128AnyTrue) \
V(V16x8AllTrue, kArm64V16x8AllTrue) \
- V(V8x16AnyTrue, kArm64V8x16AnyTrue) \
+ V(V8x16AnyTrue, kArm64V128AnyTrue) \
V(V8x16AllTrue, kArm64V8x16AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
@@ -3597,7 +3611,7 @@ void ArrangeShuffleTable(Arm64OperandGenerator* g, Node* input0, Node* input1,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -3647,7 +3661,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// Code generator uses vtbl, arrange sources to form a valid lookup table.
InstructionOperand src0, src1;
ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
- Emit(kArm64S8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ Emit(kArm64I8x16Shuffle, g.DefineAsRegister(node), src0, src1,
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
diff --git a/deps/v8/src/compiler/backend/code-generator-impl.h b/deps/v8/src/compiler/backend/code-generator-impl.h
index e335135240..88f82fe930 100644
--- a/deps/v8/src/compiler/backend/code-generator-impl.h
+++ b/deps/v8/src/compiler/backend/code-generator-impl.h
@@ -259,6 +259,12 @@ class OutOfLineCode : public ZoneObject {
inline bool HasCallDescriptorFlag(Instruction* instr,
CallDescriptor::Flag flag) {
+ STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
+#ifdef DEBUG
+ static constexpr int kInstructionCodeFlagsMask =
+ ((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
+ DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
+#endif
return MiscField::decode(instr->opcode()) & flag;
}
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 6e740b18f8..33a80f52d0 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -613,8 +613,8 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
// then the full gap resolver must be used since optimization with
// pushes don't participate in the parallel move and might clobber
// values needed for the gap resolve.
- if (source.IsStackSlot() && LocationOperand::cast(source).index() >=
- first_push_compatible_index) {
+ if (source.IsAnyStackSlot() && LocationOperand::cast(source).index() >=
+ first_push_compatible_index) {
pushes->clear();
return;
}
@@ -973,15 +973,13 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
}
void CodeGenerator::RecordCallPosition(Instruction* instr) {
- CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
-
- bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
-
+ const bool needs_frame_state =
+ HasCallDescriptorFlag(instr, CallDescriptor::kNeedsFrameState);
RecordSafepoint(instr->reference_map(), needs_frame_state
? Safepoint::kLazyDeopt
: Safepoint::kNoLazyDeopt);
- if (flags & CallDescriptor::kHasExceptionHandler) {
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kHasExceptionHandler)) {
InstructionOperandConverter i(this, instr);
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 74ec66f8d8..26d03f129a 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -310,11 +310,11 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
static bool IsValidPush(InstructionOperand source, PushTypeFlags push_type);
- // Generate a list moves from an instruction that are candidates to be turned
- // into push instructions on platforms that support them. In general, the list
- // of push candidates are moves to a set of contiguous destination
+ // Generate a list of moves from an instruction that are candidates to be
+ // turned into push instructions on platforms that support them. In general,
+ // the list of push candidates are moves to a set of contiguous destination
// InstructionOperand locations on the stack that don't clobber values that
- // are needed for resolve the gap or use values generated by the gap,
+ // are needed to resolve the gap or use values generated by the gap,
// i.e. moves that can be hoisted together before the actual gap and assembled
// together.
static void GetPushCompatibleMoves(Instruction* instr,
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 2878d6e56b..077324a31f 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -2214,9 +2214,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ psrld(kScratchDoubleReg, 1);
__ andps(dst, kScratchDoubleReg);
} else {
+ // TODO(zhin) Improve codegen for this case.
__ pcmpeqd(dst, dst);
+ __ movups(kScratchDoubleReg, src);
__ psrld(dst, 1);
- __ andps(dst, src);
+ __ andps(dst, kScratchDoubleReg);
}
break;
}
@@ -2236,9 +2238,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pslld(kScratchDoubleReg, 31);
__ xorps(dst, kScratchDoubleReg);
} else {
+ // TODO(zhin) Improve codegen for this case.
__ pcmpeqd(dst, dst);
+ __ movups(kScratchDoubleReg, src);
__ pslld(dst, 31);
- __ xorps(dst, src);
+ __ xorps(dst, kScratchDoubleReg);
}
break;
}
@@ -2251,7 +2255,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kSSEF32x4Sqrt: {
- __ sqrtps(i.OutputSimd128Register(), i.InputOperand(0));
+ // TODO(zhin) Improve codegen for this case.
+ __ movups(kScratchDoubleReg, i.InputOperand(0));
+ __ sqrtps(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXF32x4Sqrt: {
@@ -2348,7 +2354,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand src1 = i.InputOperand(1);
// See comment above for correction of minps.
__ movups(kScratchDoubleReg, src1);
- __ vminps(kScratchDoubleReg, kScratchDoubleReg, dst);
+ __ vminps(kScratchDoubleReg, kScratchDoubleReg, src0);
__ vminps(dst, src0, src1);
__ vorps(dst, dst, kScratchDoubleReg);
__ vcmpneqps(kScratchDoubleReg, dst, dst);
@@ -2381,11 +2387,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAVXF32x4Max: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
+ XMMRegister src0 = i.InputSimd128Register(0);
Operand src1 = i.InputOperand(1);
// See comment above for correction of maxps.
__ vmovups(kScratchDoubleReg, src1);
- __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vmaxps(dst, dst, src1);
+ __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, src0);
+ __ vmaxps(dst, src0, src1);
__ vxorps(dst, dst, kScratchDoubleReg);
__ vorps(kScratchDoubleReg, kScratchDoubleReg, dst);
__ vsubps(kScratchDoubleReg, kScratchDoubleReg, dst);
@@ -3643,8 +3650,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(dst, kScratchDoubleReg);
} else {
+ // TODO(zhin) Improve codegen for this case.
__ pcmpeqd(dst, dst);
- __ pxor(dst, src);
+ __ movups(kScratchDoubleReg, src);
+ __ pxor(dst, kScratchDoubleReg);
}
break;
}
@@ -3715,7 +3724,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Andnps(dst, src1);
break;
}
- case kIA32S8x16Swizzle: {
+ case kIA32I8x16Swizzle: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
XMMRegister mask = i.TempSimd128Register(0);
@@ -3728,7 +3737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufb(dst, mask);
break;
}
- case kIA32S8x16Shuffle: {
+ case kIA32I8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
Operand src0 = i.InputOperand(0);
Register tmp = i.TempRegister(0);
@@ -4690,9 +4699,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ push(kJavaScriptCallArgCountRegister);
- }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
@@ -4836,10 +4842,11 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
} else {
Register pop_reg = g.ToRegister(pop);
Register scratch_reg = pop_reg == ecx ? edx : ecx;
- __ pop(scratch_reg);
+ __ PopReturnAddressTo(scratch_reg);
__ lea(esp, Operand(esp, pop_reg, times_system_pointer_size,
static_cast<int>(pop_size)));
- __ jmp(scratch_reg);
+ __ PushReturnAddressFrom(scratch_reg);
+ __ Ret();
}
}
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 8f9f4fcf1c..eca9dc9227 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -355,8 +355,8 @@ namespace compiler {
V(SSES128Select) \
V(AVXS128Select) \
V(IA32S128AndNot) \
- V(IA32S8x16Swizzle) \
- V(IA32S8x16Shuffle) \
+ V(IA32I8x16Swizzle) \
+ V(IA32I8x16Shuffle) \
V(IA32S8x16LoadSplat) \
V(IA32S16x8LoadSplat) \
V(IA32S32x4LoadSplat) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 51a9a18e44..24abd58c7f 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -336,8 +336,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSES128Select:
case kAVXS128Select:
case kIA32S128AndNot:
- case kIA32S8x16Swizzle:
- case kIA32S8x16Shuffle:
+ case kIA32I8x16Swizzle:
+ case kIA32I8x16Shuffle:
case kIA32S32x4Swizzle:
case kIA32S32x4Shuffle:
case kIA32S16x8Blend:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index b0556fd4ef..fec4053871 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -2687,7 +2687,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -2704,9 +2704,12 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// AVX and swizzles don't generally need DefineSameAsFirst to avoid a move.
bool no_same_as_first = use_avx || is_swizzle;
// We generally need UseRegister for input0, Use for input1.
+ // TODO(v8:9198): We don't have 16-byte alignment for SIMD operands yet, but
+ // we retain this logic (continue setting these in the various shuffle match
+ // clauses), but ignore it when selecting registers or slots.
bool src0_needs_reg = true;
bool src1_needs_reg = false;
- ArchOpcode opcode = kIA32S8x16Shuffle; // general shuffle is the default
+ ArchOpcode opcode = kIA32I8x16Shuffle; // general shuffle is the default
uint8_t offset;
uint8_t shuffle32x4[4];
@@ -2794,7 +2797,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
src0_needs_reg = true;
imms[imm_count++] = index;
}
- if (opcode == kIA32S8x16Shuffle) {
+ if (opcode == kIA32I8x16Shuffle) {
// Use same-as-first for general swizzle, but not shuffle.
no_same_as_first = !is_swizzle;
src0_needs_reg = !no_same_as_first;
@@ -2810,16 +2813,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Node* input0 = node->InputAt(0);
InstructionOperand dst =
no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
- InstructionOperand src0 =
- src0_needs_reg ? g.UseRegister(input0) : g.Use(input0);
+ // TODO(v8:9198): Use src0_needs_reg when we have memory alignment for SIMD.
+ InstructionOperand src0 = g.UseRegister(input0);
+ USE(src0_needs_reg);
int input_count = 0;
InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
inputs[input_count++] = src0;
if (!is_swizzle) {
Node* input1 = node->InputAt(1);
- inputs[input_count++] =
- src1_needs_reg ? g.UseRegister(input1) : g.Use(input1);
+ // TODO(v8:9198): Use src1_needs_reg when we have memory alignment for SIMD.
+ inputs[input_count++] = g.UseRegister(input1);
+ USE(src1_needs_reg);
}
for (int i = 0; i < imm_count; ++i) {
inputs[input_count++] = g.UseImmediate(imms[i]);
@@ -2827,10 +2832,10 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kIA32S8x16Swizzle, g.DefineSameAsFirst(node),
+ Emit(kIA32I8x16Swizzle, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 84d5d249b8..8772a78df0 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -270,6 +270,8 @@ using InstructionCode = uint32_t;
// continuation into a single InstructionCode which is stored as part of
// the instruction.
using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
+static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
+ "All opcodes must fit in the 9-bit ArchOpcodeField.");
using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index d1594f9305..1c14832bbf 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -62,7 +62,12 @@ InstructionSelector::InstructionSelector(
trace_turbo_(trace_turbo),
tick_counter_(tick_counter),
max_unoptimized_frame_height_(max_unoptimized_frame_height),
- max_pushed_argument_count_(max_pushed_argument_count) {
+ max_pushed_argument_count_(max_pushed_argument_count)
+#if V8_TARGET_ARCH_64_BIT
+ ,
+ phi_states_(node_count, Upper32BitsState::kNotYetChecked, zone)
+#endif
+{
DCHECK_EQ(*max_unoptimized_frame_height, 0); // Caller-initialized.
instructions_.reserve(node_count);
@@ -2214,10 +2219,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Select(node);
case IrOpcode::kS128AndNot:
return MarkAsSimd128(node), VisitS128AndNot(node);
- case IrOpcode::kS8x16Swizzle:
- return MarkAsSimd128(node), VisitS8x16Swizzle(node);
- case IrOpcode::kS8x16Shuffle:
- return MarkAsSimd128(node), VisitS8x16Shuffle(node);
+ case IrOpcode::kI8x16Swizzle:
+ return MarkAsSimd128(node), VisitI8x16Swizzle(node);
+ case IrOpcode::kI8x16Shuffle:
+ return MarkAsSimd128(node), VisitI8x16Shuffle(node);
case IrOpcode::kV64x2AnyTrue:
return MarkAsWord32(node), VisitV64x2AnyTrue(node);
case IrOpcode::kV64x2AllTrue:
@@ -2681,39 +2686,6 @@ void InstructionSelector::VisitI64x2MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2MaxU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X
-#if !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
- !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-// TODO(v8:10308) Bitmask operations are in prototype now, we can remove these
-// guards when they go into the proposal.
-void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
-// TODO(v8:10501) Prototyping pmin and pmax instructions.
-void InstructionSelector::VisitF32x4Pmin(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Pmax(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Pmin(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Pmax(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32
- // && !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X &&
- // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
- !V8_TARGET_ARCH_MIPS64
-// TODO(v8:10553) Prototyping floating point rounding instructions.
-void InstructionSelector::VisitF64x2Ceil(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Floor(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Trunc(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2NearestInt(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Ceil(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Floor(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4Trunc(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4NearestInt(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_S390X
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM &&
- // !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM64 && \
!V8_TARGET_ARCH_ARM
// TODO(v8:10583) Prototype i32x4.dot_i16x8_s
@@ -2737,10 +2709,20 @@ void InstructionSelector::VisitParameter(Node* node) {
}
namespace {
+
LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
MachineType::IntPtr());
}
+
+constexpr InstructionCode EncodeCallDescriptorFlags(
+ InstructionCode opcode, CallDescriptor::Flags flags) {
+ // Note: Not all bits of `flags` are preserved.
+ STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
+ MiscField::kSize);
+ return opcode | MiscField::encode(flags & MiscField::kMax);
+}
+
} // namespace
void InstructionSelector::VisitIfException(Node* node) {
@@ -2863,6 +2845,7 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
#if ABI_USES_FUNCTION_DESCRIPTORS
// Highest misc_field bit is used on AIX to indicate if a CFunction call
// has function descriptor or not.
+ STATIC_ASSERT(MiscField::kSize == kHasFunctionDescriptorBitShift + 1);
if (!call_descriptor->NoFunctionDescriptor()) {
misc_field |= 1 << kHasFunctionDescriptorBitShift;
}
@@ -2871,18 +2854,18 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
break;
}
case CallDescriptor::kCallCodeObject:
- opcode = kArchCallCodeObject | MiscField::encode(flags);
+ opcode = EncodeCallDescriptorFlags(kArchCallCodeObject, flags);
break;
case CallDescriptor::kCallJSFunction:
- opcode = kArchCallJSFunction | MiscField::encode(flags);
+ opcode = EncodeCallDescriptorFlags(kArchCallJSFunction, flags);
break;
case CallDescriptor::kCallWasmCapiFunction:
case CallDescriptor::kCallWasmFunction:
case CallDescriptor::kCallWasmImportWrapper:
- opcode = kArchCallWasmFunction | MiscField::encode(flags);
+ opcode = EncodeCallDescriptorFlags(kArchCallWasmFunction, flags);
break;
case CallDescriptor::kCallBuiltinPointer:
- opcode = kArchCallBuiltinPointer | MiscField::encode(flags);
+ opcode = EncodeCallDescriptorFlags(kArchCallBuiltinPointer, flags);
break;
}
@@ -2912,9 +2895,9 @@ void InstructionSelector::VisitTailCall(Node* node) {
auto call_descriptor = CallDescriptorOf(node->op());
CallDescriptor* caller = linkage()->GetIncomingDescriptor();
- DCHECK(caller->CanTailCall(CallDescriptorOf(node->op())));
const CallDescriptor* callee = CallDescriptorOf(node->op());
- int stack_param_delta = callee->GetStackParameterDelta(caller);
+ DCHECK(caller->CanTailCall(callee));
+ const int stack_param_delta = callee->GetStackParameterDelta(caller);
CallBuffer buffer(zone(), call_descriptor, nullptr);
// Compute InstructionOperands for inputs and outputs.
@@ -2931,7 +2914,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
InstructionOperandVector temps(zone());
- if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+ if (caller->IsJSFunctionCall()) {
switch (call_descriptor->kind()) {
case CallDescriptor::kCallCodeObject:
opcode = kArchTailCallCodeObjectFromJSFunction;
@@ -2960,7 +2943,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
return;
}
}
- opcode |= MiscField::encode(call_descriptor->flags());
+ opcode = EncodeCallDescriptorFlags(opcode, call_descriptor->flags());
Emit(kArchPrepareTailCall, g.NoOutput());
@@ -2969,7 +2952,7 @@ void InstructionSelector::VisitTailCall(Node* node) {
// instruction. This is used by backends that need to pad arguments for stack
// alignment, in order to store an optional slot of padding above the
// arguments.
- int optional_padding_slot = callee->GetFirstUnusedStackSlot();
+ const int optional_padding_slot = callee->GetFirstUnusedStackSlot();
buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
const int first_unused_stack_slot =
@@ -3131,6 +3114,54 @@ bool InstructionSelector::CanProduceSignalingNaN(Node* node) {
return true;
}
+#if V8_TARGET_ARCH_64_BIT
+bool InstructionSelector::ZeroExtendsWord32ToWord64(Node* node,
+ int recursion_depth) {
+ // To compute whether a Node sets its upper 32 bits to zero, there are three
+ // cases.
+ // 1. Phi node, with a computed result already available in phi_states_:
+ // Read the value from phi_states_.
+ // 2. Phi node, with no result available in phi_states_ yet:
+ // Recursively check its inputs, and store the result in phi_states_.
+ // 3. Anything else:
+ // Call the architecture-specific ZeroExtendsWord32ToWord64NoPhis.
+
+ // Limit recursion depth to avoid the possibility of stack overflow on very
+ // large functions.
+ const int kMaxRecursionDepth = 100;
+
+ if (node->opcode() == IrOpcode::kPhi) {
+ Upper32BitsState current = phi_states_[node->id()];
+ if (current != Upper32BitsState::kNotYetChecked) {
+ return current == Upper32BitsState::kUpperBitsGuaranteedZero;
+ }
+
+ // If further recursion is prevented, we can't make any assumptions about
+ // the output of this phi node.
+ if (recursion_depth >= kMaxRecursionDepth) {
+ return false;
+ }
+
+ // Mark the current node so that we skip it if we recursively visit it
+ // again. Or, said differently, we compute a largest fixed-point so we can
+ // be optimistic when we hit cycles.
+ phi_states_[node->id()] = Upper32BitsState::kUpperBitsGuaranteedZero;
+
+ int input_count = node->op()->ValueInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (!ZeroExtendsWord32ToWord64(input, recursion_depth + 1)) {
+ phi_states_[node->id()] = Upper32BitsState::kNoGuarantee;
+ return false;
+ }
+ }
+
+ return true;
+ }
+ return ZeroExtendsWord32ToWord64NoPhis(node);
+}
+#endif // V8_TARGET_ARCH_64_BIT
+
namespace {
FrameStateDescriptor* GetFrameStateDescriptorInternal(Zone* zone, Node* state) {
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 0aa5dbbeaf..6452e3ec4c 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -667,6 +667,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWord64AtomicNarrowBinop(Node* node, ArchOpcode uint8_op,
ArchOpcode uint16_op, ArchOpcode uint32_op);
+#if V8_TARGET_ARCH_64_BIT
+ bool ZeroExtendsWord32ToWord64(Node* node, int recursion_depth = 0);
+ bool ZeroExtendsWord32ToWord64NoPhis(Node* node);
+
+ enum Upper32BitsState : uint8_t {
+ kNotYetChecked,
+ kUpperBitsGuaranteedZero,
+ kNoGuarantee,
+ };
+#endif // V8_TARGET_ARCH_64_BIT
+
// ===========================================================================
Zone* const zone_;
@@ -702,6 +713,13 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
// arguments (for calls). Later used to apply an offset to stack checks.
size_t* max_unoptimized_frame_height_;
size_t* max_pushed_argument_count_;
+
+#if V8_TARGET_ARCH_64_BIT
+ // Holds lazily-computed results for whether phi nodes guarantee their upper
+ // 32 bits to be zero. Indexed by node ID; nobody reads or writes the values
+ // for non-phi nodes.
+ ZoneVector<Upper32BitsState> phi_states_;
+#endif
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/backend/live-range-separator.cc b/deps/v8/src/compiler/backend/live-range-separator.cc
deleted file mode 100644
index acfe23dd06..0000000000
--- a/deps/v8/src/compiler/backend/live-range-separator.cc
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/backend/live-range-separator.h"
-#include "src/compiler/backend/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-#define TRACE_COND(cond, ...) \
- do { \
- if (cond) PrintF(__VA_ARGS__); \
- } while (false)
-
-namespace {
-
-void CreateSplinter(TopLevelLiveRange* range,
- TopTierRegisterAllocationData* data,
- LifetimePosition first_cut, LifetimePosition last_cut,
- bool trace_alloc) {
- DCHECK(!range->IsSplinter());
- // We can ignore ranges that live solely in deferred blocks.
- // If a range ends right at the end of a deferred block, it is marked by
- // the range builder as ending at gap start of the next block - since the
- // end is a position where the variable isn't live. We need to take that
- // into consideration.
- LifetimePosition max_allowed_end = last_cut.NextFullStart();
-
- if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
- return;
- }
-
- LifetimePosition start = Max(first_cut, range->Start());
- LifetimePosition end = Min(last_cut, range->End());
-
- if (start < end) {
- // Ensure the original range has a spill range associated, before it gets
- // splintered. Splinters will point to it. This way, when attempting to
- // reuse spill slots of splinters, during allocation, we avoid clobbering
- // such slots.
- if (range->MayRequireSpillRange()) {
- data->CreateSpillRangeForLiveRange(range);
- }
- if (range->splinter() == nullptr) {
- TopLevelLiveRange* splinter =
- data->NextLiveRange(range->representation());
- DCHECK_NULL(data->live_ranges()[splinter->vreg()]);
- data->live_ranges()[splinter->vreg()] = splinter;
- range->SetSplinter(splinter);
- }
- Zone* zone = data->allocation_zone();
- TRACE_COND(trace_alloc,
- "creating splinter %d for range %d between %d and %d\n",
- range->splinter()->vreg(), range->vreg(),
- start.ToInstructionIndex(), end.ToInstructionIndex());
- range->Splinter(start, end, zone);
- }
-}
-
-void SetSlotUse(TopLevelLiveRange* range) {
- range->reset_slot_use();
- for (const UsePosition* pos = range->first_pos();
- !range->has_slot_use() && pos != nullptr; pos = pos->next()) {
- if (pos->type() == UsePositionType::kRequiresSlot) {
- range->register_slot_use(TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
- }
- }
-}
-
-void SplinterLiveRange(TopLevelLiveRange* range,
- TopTierRegisterAllocationData* data) {
- const InstructionSequence* code = data->code();
- UseInterval* interval = range->first_interval();
-
- LifetimePosition first_cut = LifetimePosition::Invalid();
- LifetimePosition last_cut = LifetimePosition::Invalid();
-
- while (interval != nullptr) {
- // We have to cache these here, as splintering might destroy the original
- // interval below.
- UseInterval* next_interval = interval->next();
- LifetimePosition interval_end = interval->end();
- const InstructionBlock* first_block =
- code->GetInstructionBlock(interval->FirstGapIndex());
- const InstructionBlock* last_block =
- code->GetInstructionBlock(interval->LastGapIndex());
- int first_block_nr = first_block->rpo_number().ToInt();
- int last_block_nr = last_block->rpo_number().ToInt();
- for (int block_id = first_block_nr; block_id <= last_block_nr; ++block_id) {
- const InstructionBlock* current_block =
- code->InstructionBlockAt(RpoNumber::FromInt(block_id));
- if (current_block->IsDeferred()) {
- if (!first_cut.IsValid()) {
- first_cut = LifetimePosition::GapFromInstructionIndex(
- current_block->first_instruction_index());
- }
- // We splinter until the last gap in the block. I assume this is done to
- // leave a little range to be allocated by normal register allocation
- // and then use that range to connect when splinters are merged back.
- // This might be done as control flow resolution does not insert moves
- // if two consecutive blocks in rpo order are also consecutive in
- // control flow.
- last_cut = LifetimePosition::GapFromInstructionIndex(
- current_block->last_instruction_index());
- } else {
- if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, last_cut,
- data->is_trace_alloc());
- first_cut = LifetimePosition::Invalid();
- last_cut = LifetimePosition::Invalid();
- }
- }
- }
- // If we reach the end of an interval with a first_cut and last_cut set, it
- // means that we can splinter to the end of the interval, as the value dies
- // in this control flow branch or is not live in the next block. In the
- // former case, we won't need to reload the value, so we can splinter to the
- // end of its lifetime. In the latter case, control flow resolution will
- // have to connect blocks anyway, so we can also splinter to the end of the
- // block, too.
- if (first_cut.IsValid()) {
- CreateSplinter(range, data, first_cut, interval_end,
- data->is_trace_alloc());
- first_cut = LifetimePosition::Invalid();
- last_cut = LifetimePosition::Invalid();
- }
- interval = next_interval;
- }
-
- // Redo has_slot_use
- if (range->has_slot_use() && range->splinter() != nullptr) {
- SetSlotUse(range);
- SetSlotUse(range->splinter());
- }
-}
-
-} // namespace
-
-void LiveRangeSeparator::Splinter() {
- size_t virt_reg_count = data()->live_ranges().size();
- for (size_t vreg = 0; vreg < virt_reg_count; ++vreg) {
- TopLevelLiveRange* range = data()->live_ranges()[vreg];
- if (range == nullptr || range->IsEmpty() || range->IsSplinter()) {
- continue;
- }
- int first_instr = range->first_interval()->FirstGapIndex();
- if (!data()->code()->GetInstructionBlock(first_instr)->IsDeferred()) {
- SplinterLiveRange(range, data());
- }
- }
-}
-
-void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
- const InstructionSequence* code = data()->code();
- for (TopLevelLiveRange* top : data()->live_ranges()) {
- if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
- top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
- continue;
- }
-
- LiveRange* child = top;
- for (; child != nullptr; child = child->next()) {
- if (child->spilled() ||
- child->NextSlotPosition(child->Start()) != nullptr) {
- break;
- }
- }
- if (child == nullptr) {
- DCHECK(!data()->is_turbo_control_flow_aware_allocation());
- top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
- code->InstructionBlockCount());
- }
- }
-}
-
-void LiveRangeMerger::Merge() {
- MarkRangesSpilledInDeferredBlocks();
-
- int live_range_count = static_cast<int>(data()->live_ranges().size());
- for (int i = 0; i < live_range_count; ++i) {
- TopLevelLiveRange* range = data()->live_ranges()[i];
- if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
- continue;
- }
- TopLevelLiveRange* splinter_parent = range->splintered_from();
-
- int to_remove = range->vreg();
- splinter_parent->Merge(range, data()->allocation_zone());
- data()->live_ranges()[to_remove] = nullptr;
- }
-}
-
-#undef TRACE_COND
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/backend/live-range-separator.h b/deps/v8/src/compiler/backend/live-range-separator.h
deleted file mode 100644
index f84b275e08..0000000000
--- a/deps/v8/src/compiler/backend/live-range-separator.h
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
-#define V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
-
-#include "src/zone/zone.h"
-namespace v8 {
-namespace internal {
-
-class Zone;
-
-namespace compiler {
-
-class TopTierRegisterAllocationData;
-
-// A register allocation pair of transformations: splinter and merge live ranges
-class LiveRangeSeparator final : public ZoneObject {
- public:
- LiveRangeSeparator(TopTierRegisterAllocationData* data, Zone* zone)
- : data_(data), zone_(zone) {}
-
- void Splinter();
-
- private:
- TopTierRegisterAllocationData* data() const { return data_; }
- Zone* zone() const { return zone_; }
-
- TopTierRegisterAllocationData* const data_;
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
-};
-
-class LiveRangeMerger final : public ZoneObject {
- public:
- LiveRangeMerger(TopTierRegisterAllocationData* data, Zone* zone)
- : data_(data), zone_(zone) {}
-
- void Merge();
-
- private:
- TopTierRegisterAllocationData* data() const { return data_; }
- Zone* zone() const { return zone_; }
-
- // Mark ranges spilled in deferred blocks, that also cover non-deferred code.
- // We do nothing special for ranges fully contained in deferred blocks,
- // because they would "spill in deferred blocks" anyway.
- void MarkRangesSpilledInDeferredBlocks();
-
- TopTierRegisterAllocationData* const data_;
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_COMPILER_BACKEND_LIVE_RANGE_SEPARATOR_H_
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 07416ab8ba..5457883fee 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -1239,7 +1239,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsAbsS:
- __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ } else {
+ __ mfc1(kScratchReg, i.InputSingleRegister(0));
+ __ Ins(kScratchReg, zero_reg, 31, 1);
+ __ mtc1(kScratchReg, i.OutputSingleRegister());
+ }
break;
case kMipsSqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -1330,9 +1336,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMipsAbsD:
- __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kMipsAbsD: {
+ FPURegister src = i.InputDoubleRegister(0);
+ FPURegister dst = i.OutputDoubleRegister();
+ if (IsMipsArchVariant(kMips32r6)) {
+ __ abs_d(dst, src);
+ } else {
+ __ Move(dst, src);
+ __ mfhc1(kScratchReg, src);
+ __ Ins(kScratchReg, zero_reg, 31, 1);
+ __ mthc1(kScratchReg, dst);
+ }
break;
+ }
case kMipsNegS:
__ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
break;
@@ -3274,7 +3290,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
break;
}
- case kMipsS8x16Shuffle: {
+ case kMipsI8x16Shuffle: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
@@ -3299,7 +3315,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshf_b(dst, src1, src0);
break;
}
- case kMipsS8x16Swizzle: {
+ case kMipsI8x16Swizzle: {
Simd128Register dst = i.OutputSimd128Register(),
tbl = i.InputSimd128Register(0),
ctl = i.InputSimd128Register(1);
@@ -3905,9 +3921,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ Push(kJavaScriptCallArgCountRegister);
- }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index b95bd82d28..46ce3d359a 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -307,8 +307,8 @@ namespace compiler {
V(MipsS8x16PackOdd) \
V(MipsS8x16InterleaveEven) \
V(MipsS8x16InterleaveOdd) \
- V(MipsS8x16Shuffle) \
- V(MipsS8x16Swizzle) \
+ V(MipsI8x16Shuffle) \
+ V(MipsI8x16Swizzle) \
V(MipsS8x16Concat) \
V(MipsS8x8Reverse) \
V(MipsS8x4Reverse) \
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 507bb14664..64e78b8122 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -273,8 +273,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMipsS8x16InterleaveRight:
case kMipsS8x16PackEven:
case kMipsS8x16PackOdd:
- case kMipsS8x16Shuffle:
- case kMipsS8x16Swizzle:
+ case kMipsI8x16Shuffle:
+ case kMipsI8x16Swizzle:
case kMipsS8x2Reverse:
case kMipsS8x4Reverse:
case kMipsS8x8Reverse:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 6aabbf3761..b552b0dec1 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -1388,9 +1388,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- UNREACHABLE();
+ opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb;
+ break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh;
break;
@@ -1409,6 +1409,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kMipsMsaLd;
break;
+ case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
@@ -1446,9 +1447,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kMipsUsdc1;
break;
- case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- UNREACHABLE();
+ opcode = kMipsSb;
+ break;
case MachineRepresentation::kWord16:
opcode = kMipsUsh;
break;
@@ -1461,6 +1462,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kMipsMsaSt;
break;
+ case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
@@ -2378,7 +2380,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -2404,7 +2406,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
return;
}
- Emit(kMipsS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ Emit(kMipsI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
@@ -2412,15 +2414,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
MipsOperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
// We don't want input 0 or input 1 to be the same as output, since we will
// modify output before do the calculation.
- Emit(kMipsS8x16Swizzle, g.DefineAsRegister(node),
+ Emit(kMipsI8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 2fda592ae1..bb01eab924 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -1318,7 +1318,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64AbsS:
- __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ if (kArchVariant == kMips64r6) {
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ } else {
+ __ mfc1(kScratchReg, i.InputSingleRegister(0));
+ __ Dins(kScratchReg, zero_reg, 31, 1);
+ __ mtc1(kScratchReg, i.OutputSingleRegister());
+ }
break;
case kMips64NegS:
__ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
@@ -1378,7 +1384,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64AbsD:
- __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ if (kArchVariant == kMips64r6) {
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ dmfc1(kScratchReg, i.InputDoubleRegister(0));
+ __ Dins(kScratchReg, zero_reg, 63, 1);
+ __ dmtc1(kScratchReg, i.OutputDoubleRegister());
+ }
break;
case kMips64NegD:
__ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -1810,19 +1822,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMips64Peek: {
- // The incoming value is 0-based, but we need a 1-based value.
- int reverse_slot = i.InputInt32(0) + 1;
+ int reverse_slot = i.InputInt32(0);
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
- } else {
- DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
- __ lwc1(
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ __ Lwc1(
i.OutputSingleRegister(0),
MemOperand(fp, offset + kLessSignificantWordInDoublewordOffset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset));
}
} else {
__ Ld(i.OutputRegister(0), MemOperand(fp, offset));
@@ -2304,38 +2317,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F64x2Ceil: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToPlusInf);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToPlusInf);
break;
}
case kMips64F64x2Floor: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToMinusInf);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToMinusInf);
break;
}
case kMips64F64x2Trunc: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToZero);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToZero);
break;
}
case kMips64F64x2NearestInt: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- // kRoundToNearest == 0
- __ ctcmsa(MSACSR, zero_reg);
- __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToNearest);
break;
}
case kMips64I64x2ReplaceLane: {
@@ -2676,38 +2677,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64F32x4Ceil: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToPlusInf);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToPlusInf);
break;
}
case kMips64F32x4Floor: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToMinusInf);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToMinusInf);
break;
}
case kMips64F32x4Trunc: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- __ li(kScratchReg2, kRoundToZero);
- __ ctcmsa(MSACSR, kScratchReg2);
- __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToZero);
break;
}
case kMips64F32x4NearestInt: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
- __ cfcmsa(kScratchReg, MSACSR);
- // kRoundToNearest == 0
- __ ctcmsa(MSACSR, zero_reg);
- __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ ctcmsa(MSACSR, kScratchReg);
+ __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kRoundToNearest);
break;
}
case kMips64I32x4SConvertF32x4: {
@@ -3520,7 +3509,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2));
break;
}
- case kMips64S8x16Shuffle: {
+ case kMips64I8x16Shuffle: {
CpuFeatureScope msa_scope(tasm(), MIPS_SIMD);
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
@@ -3545,7 +3534,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vshf_b(dst, src1, src0);
break;
}
- case kMips64S8x16Swizzle: {
+ case kMips64I8x16Swizzle: {
Simd128Register dst = i.OutputSimd128Register(),
tbl = i.InputSimd128Register(0),
ctl = i.InputSimd128Register(1);
@@ -4202,9 +4191,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ Push(kJavaScriptCallArgCountRegister);
- }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index fb60316517..577db6347c 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -342,8 +342,8 @@ namespace compiler {
V(Mips64S8x16PackOdd) \
V(Mips64S8x16InterleaveEven) \
V(Mips64S8x16InterleaveOdd) \
- V(Mips64S8x16Shuffle) \
- V(Mips64S8x16Swizzle) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
V(Mips64S8x16Concat) \
V(Mips64S8x8Reverse) \
V(Mips64S8x4Reverse) \
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 347cf577de..caf472bf30 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -305,8 +305,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S8x2Reverse:
case kMips64S8x4Reverse:
case kMips64S8x8Reverse:
- case kMips64S8x16Shuffle:
- case kMips64S8x16Swizzle:
+ case kMips64I8x16Shuffle:
+ case kMips64I8x16Swizzle:
case kMips64Sar:
case kMips64Seb:
case kMips64Seh:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 16cc2bfa86..2c807b4183 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -1399,35 +1399,40 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
}
-void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
- Mips64OperandGenerator g(this);
- Node* value = node->InputAt(0);
- switch (value->opcode()) {
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
// 32-bit operations will write their result in a 64 bit register,
// clearing the top 32 bits of the destination register.
case IrOpcode::kUint32Div:
case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh: {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
- return;
- }
+ case IrOpcode::kUint32MulHigh:
+ return true;
case IrOpcode::kLoad: {
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
if (load_rep.IsUnsigned()) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
- return;
+ return true;
default:
- break;
+ return false;
}
}
- break;
+ return false;
}
default:
- break;
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (ZeroExtendsWord32ToWord64(value)) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ return;
}
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
@@ -1711,7 +1716,7 @@ void InstructionSelector::EmitPrepareResults(
Node* node) {
Mips64OperandGenerator g(this);
- int reverse_slot = 0;
+ int reverse_slot = 1;
for (PushParameter output : *results) {
if (!output.location.IsCallerFrameSlot()) continue;
// Skip any alignment holes in nodes.
@@ -1721,6 +1726,8 @@ void InstructionSelector::EmitPrepareResults(
MarkAsFloat32(output.node);
} else if (output.location.GetType() == MachineType::Float64()) {
MarkAsFloat64(output.node);
+ } else if (output.location.GetType() == MachineType::Simd128()) {
+ MarkAsSimd128(output.node);
}
Emit(kMips64Peek, g.DefineAsRegister(output.node),
g.UseImmediate(reverse_slot));
@@ -1747,9 +1754,9 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kMips64Uldc1;
break;
- case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- UNREACHABLE();
+ opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
+ break;
case MachineRepresentation::kWord16:
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
@@ -1765,6 +1772,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kMips64MsaLd;
break;
+ case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
@@ -1799,9 +1807,9 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kMips64Usdc1;
break;
- case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- UNREACHABLE();
+ opcode = kMips64Sb;
+ break;
case MachineRepresentation::kWord16:
opcode = kMips64Ush;
break;
@@ -1817,6 +1825,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kMips64MsaSt;
break;
+ case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kNone:
@@ -3073,7 +3082,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -3099,7 +3108,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
return;
}
- Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ Emit(kMips64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
@@ -3107,15 +3116,14 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
Mips64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
// We don't want input 0 or input 1 to be the same as output, since we will
// modify output before do the calculation.
- Emit(kMips64S8x16Swizzle, g.DefineAsRegister(node),
+ Emit(kMips64I8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)),
- arraysize(temps), temps);
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/ppc/OWNERS b/deps/v8/src/compiler/backend/ppc/OWNERS
index 6d1a8fc472..6edd45a6ef 100644
--- a/deps/v8/src/compiler/backend/ppc/OWNERS
+++ b/deps/v8/src/compiler/backend/ppc/OWNERS
@@ -1,4 +1,4 @@
-jyan@ca.ibm.com
+junyan@redhat.com
joransiu@ca.ibm.com
-michael_dawson@ca.ibm.com
-miladfar@ca.ibm.com \ No newline at end of file
+midawson@redhat.com
+mfarazma@redhat.com
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 9d112495b3..767247b2fd 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -449,41 +449,42 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_FLOAT_MAX() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ fcmpu(left_reg, right_reg); \
- __ bunordered(&check_nan_left); \
- __ beq(&check_zero); \
- __ bge(&return_left); \
- __ b(&return_right); \
- \
- __ bind(&check_zero); \
- __ fcmpu(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left); \
- /* At this point, both left and right are either 0 or -0. */ \
- __ fadd(result_reg, left_reg, right_reg); \
- __ b(&done); \
- \
- __ bind(&check_nan_left); \
- __ fcmpu(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left); \
- __ bind(&return_right); \
- if (right_reg != result_reg) { \
- __ fmr(result_reg, right_reg); \
- } \
- __ b(&done); \
- \
- __ bind(&return_left); \
- if (left_reg != result_reg) { \
- __ fmr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_FLOAT_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_zero, return_left, return_right, return_nan, done; \
+ __ fcmpu(left_reg, right_reg); \
+ __ bunordered(&return_nan); \
+ __ beq(&check_zero); \
+ __ bge(&return_left); \
+ __ b(&return_right); \
+ \
+ __ bind(&check_zero); \
+ __ fcmpu(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ __ fadd(result_reg, left_reg, right_reg); \
+ __ b(&done); \
+ \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, fadd propagates the appropriate one.*/ \
+ __ fadd(result_reg, left_reg, right_reg); \
+ __ b(&done); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ fmr(result_reg, right_reg); \
+ } \
+ __ b(&done); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ fmr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
#define ASSEMBLE_FLOAT_MIN() \
@@ -491,9 +492,9 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
DoubleRegister left_reg = i.InputDoubleRegister(0); \
DoubleRegister right_reg = i.InputDoubleRegister(1); \
DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
+ Label check_zero, return_left, return_right, return_nan, done; \
__ fcmpu(left_reg, right_reg); \
- __ bunordered(&check_nan_left); \
+ __ bunordered(&return_nan); \
__ beq(&check_zero); \
__ ble(&return_left); \
__ b(&return_right); \
@@ -515,10 +516,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
__ fneg(result_reg, result_reg); \
__ b(&done); \
\
- __ bind(&check_nan_left); \
- __ fcmpu(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left); \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, fadd propagates the appropriate one.*/ \
+ __ fadd(result_reg, left_reg, right_reg); \
+ __ b(&done); \
\
__ bind(&return_right); \
if (right_reg != result_reg) { \
@@ -3228,7 +3229,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_S8x16Shuffle: {
+ case kPPC_I8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -3287,7 +3288,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_S8x16Swizzle: {
+ case kPPC_I8x16Swizzle: {
// Reverse the input to match IBM lane numbering.
Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
__ addi(sp, sp, Operand(-16));
@@ -3355,6 +3356,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kPPC_S128AndNot: {
+ Simd128Register dst = i.OutputSimd128Register();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ vandc(dst, src, i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Div: {
+ __ xvdivdp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+#define F64X2_MIN_MAX_NAN(result) \
+ Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)); \
+ __ xvcmpeqdp(tempFPReg1, i.InputSimd128Register(0), \
+ i.InputSimd128Register(0)); \
+ __ vsel(result, i.InputSimd128Register(0), result, tempFPReg1); \
+ __ xvcmpeqdp(tempFPReg1, i.InputSimd128Register(1), \
+ i.InputSimd128Register(1)); \
+ __ vsel(i.OutputSimd128Register(), i.InputSimd128Register(1), result, \
+ tempFPReg1);
+ case kPPC_F64x2Min: {
+ __ xvmindp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ // We need to check if an input is NAN and preserve it.
+ F64X2_MIN_MAX_NAN(kScratchDoubleReg)
+ break;
+ }
+ case kPPC_F64x2Max: {
+ __ xvmaxdp(kScratchDoubleReg, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ // We need to check if an input is NAN and preserve it.
+ F64X2_MIN_MAX_NAN(kScratchDoubleReg)
+ break;
+ }
+#undef F64X2_MIN_MAX_NAN
+ case kPPC_F32x4Div: {
+ __ xvdivsp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Min: {
+ __ vminfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F32x4Max: {
+ __ vmaxfp(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kPPC_F64x2Ceil: {
+ __ xvrdpip(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2Floor: {
+ __ xvrdpim(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2Trunc: {
+ __ xvrdpiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F64x2NearestInt: {
+ __ xvrdpi(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Ceil: {
+ __ xvrspip(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Floor: {
+ __ xvrspim(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4Trunc: {
+ __ xvrspiz(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kPPC_F32x4NearestInt: {
+ __ xvrspi(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3631,9 +3714,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ Push(kJavaScriptCallArgCountRegister);
- }
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 9dc7bf49d0..fb5151ebd4 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -207,6 +207,13 @@ namespace compiler {
V(PPC_F64x2Sqrt) \
V(PPC_F64x2Qfma) \
V(PPC_F64x2Qfms) \
+ V(PPC_F64x2Div) \
+ V(PPC_F64x2Min) \
+ V(PPC_F64x2Max) \
+ V(PPC_F64x2Ceil) \
+ V(PPC_F64x2Floor) \
+ V(PPC_F64x2Trunc) \
+ V(PPC_F64x2NearestInt) \
V(PPC_F32x4Splat) \
V(PPC_F32x4ExtractLane) \
V(PPC_F32x4ReplaceLane) \
@@ -225,6 +232,13 @@ namespace compiler {
V(PPC_F32x4Sqrt) \
V(PPC_F32x4SConvertI32x4) \
V(PPC_F32x4UConvertI32x4) \
+ V(PPC_F32x4Div) \
+ V(PPC_F32x4Min) \
+ V(PPC_F32x4Max) \
+ V(PPC_F32x4Ceil) \
+ V(PPC_F32x4Floor) \
+ V(PPC_F32x4Trunc) \
+ V(PPC_F32x4NearestInt) \
V(PPC_I64x2Splat) \
V(PPC_I64x2ExtractLane) \
V(PPC_I64x2ReplaceLane) \
@@ -338,8 +352,8 @@ namespace compiler {
V(PPC_I8x16AddSaturateU) \
V(PPC_I8x16SubSaturateU) \
V(PPC_I8x16RoundingAverageU) \
- V(PPC_S8x16Shuffle) \
- V(PPC_S8x16Swizzle) \
+ V(PPC_I8x16Shuffle) \
+ V(PPC_I8x16Swizzle) \
V(PPC_V64x2AnyTrue) \
V(PPC_V32x4AnyTrue) \
V(PPC_V16x8AnyTrue) \
@@ -354,6 +368,7 @@ namespace compiler {
V(PPC_S128Zero) \
V(PPC_S128Not) \
V(PPC_S128Select) \
+ V(PPC_S128AndNot) \
V(PPC_StoreCompressTagged) \
V(PPC_LoadDecompressTaggedSigned) \
V(PPC_LoadDecompressTaggedPointer) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 0493d81dd7..8beaa8539c 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -130,6 +130,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_F64x2Sqrt:
case kPPC_F64x2Qfma:
case kPPC_F64x2Qfms:
+ case kPPC_F64x2Div:
+ case kPPC_F64x2Min:
+ case kPPC_F64x2Max:
+ case kPPC_F64x2Ceil:
+ case kPPC_F64x2Floor:
+ case kPPC_F64x2Trunc:
+ case kPPC_F64x2NearestInt:
case kPPC_F32x4Splat:
case kPPC_F32x4ExtractLane:
case kPPC_F32x4ReplaceLane:
@@ -150,6 +157,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_F32x4UConvertI32x4:
case kPPC_F32x4Qfma:
case kPPC_F32x4Qfms:
+ case kPPC_F32x4Div:
+ case kPPC_F32x4Min:
+ case kPPC_F32x4Max:
+ case kPPC_F32x4Ceil:
+ case kPPC_F32x4Floor:
+ case kPPC_F32x4Trunc:
+ case kPPC_F32x4NearestInt:
case kPPC_I64x2Splat:
case kPPC_I64x2ExtractLane:
case kPPC_I64x2ReplaceLane:
@@ -261,8 +275,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I8x16AddSaturateU:
case kPPC_I8x16SubSaturateU:
case kPPC_I8x16RoundingAverageU:
- case kPPC_S8x16Shuffle:
- case kPPC_S8x16Swizzle:
+ case kPPC_I8x16Shuffle:
+ case kPPC_I8x16Swizzle:
case kPPC_V64x2AnyTrue:
case kPPC_V32x4AnyTrue:
case kPPC_V16x8AnyTrue:
@@ -277,6 +291,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Zero:
case kPPC_S128Not:
case kPPC_S128Select:
+ case kPPC_S128AndNot:
return kNoOpcodeFlags;
case kPPC_LoadWordS8:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 507542e28c..0c61821cf5 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -1200,6 +1200,10 @@ void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
VisitRR(this, kPPC_ExtendSignWord32, node);
}
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
VisitRR(this, kPPC_Uint32ToUint64, node);
@@ -2156,6 +2160,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Ne) \
V(F64x2Le) \
V(F64x2Lt) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max) \
V(F32x4Add) \
V(F32x4AddHoriz) \
V(F32x4Sub) \
@@ -2164,6 +2171,9 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(F32x4Div) \
+ V(F32x4Min) \
+ V(F32x4Max) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Mul) \
@@ -2222,15 +2232,20 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16AddSaturateU) \
V(I8x16SubSaturateU) \
V(I8x16RoundingAverageU) \
+ V(I8x16Swizzle) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
- V(S8x16Swizzle)
+ V(S128AndNot)
#define SIMD_UNOP_LIST(V) \
V(F64x2Abs) \
V(F64x2Neg) \
V(F64x2Sqrt) \
+ V(F64x2Ceil) \
+ V(F64x2Floor) \
+ V(F64x2Trunc) \
+ V(F64x2NearestInt) \
V(F32x4Abs) \
V(F32x4Neg) \
V(F32x4RecipApprox) \
@@ -2238,6 +2253,10 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4UConvertI32x4) \
+ V(F32x4Ceil) \
+ V(F32x4Floor) \
+ V(F32x4Trunc) \
+ V(F32x4NearestInt) \
V(I64x2Neg) \
V(I32x4Neg) \
V(I32x4Abs) \
@@ -2361,7 +2380,7 @@ SIMD_BOOL_LIST(SIMD_VISIT_BOOL)
#undef SIMD_BOOL_LIST
#undef SIMD_TYPES
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -2378,7 +2397,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
? max_index - current_index
: total_lane_count - current_index + max_index);
}
- Emit(kPPC_S8x16Shuffle, g.DefineAsRegister(node), g.UseUniqueRegister(input0),
+ Emit(kPPC_I8x16Shuffle, g.DefineAsRegister(node), g.UseUniqueRegister(input0),
g.UseUniqueRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_remapped + 4)),
@@ -2400,7 +2419,11 @@ void InstructionSelector::VisitS128Select(Node* node) {
void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitS128AndNot(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16BitMask(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8BitMask(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4BitMask(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::EmitPrepareResults(
ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
@@ -2427,19 +2450,15 @@ void InstructionSelector::EmitPrepareResults(
}
}
-void InstructionSelector::VisitF32x4Div(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Div(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Pmin(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Min(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF32x4Pmax(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF64x2Max(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Pmin(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitLoadTransform(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitF64x2Pmax(Node* node) { UNIMPLEMENTED(); }
// static
MachineOperatorBuilder::Flags
diff --git a/deps/v8/src/compiler/backend/register-allocator.cc b/deps/v8/src/compiler/backend/register-allocator.cc
index b3b40281b9..30724647c6 100644
--- a/deps/v8/src/compiler/backend/register-allocator.cc
+++ b/deps/v8/src/compiler/backend/register-allocator.cc
@@ -300,7 +300,6 @@ LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
next_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
- splitting_pointer_(nullptr),
current_hint_position_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
@@ -597,10 +596,7 @@ UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
// Find the last use position before the split and the first use
// position after it.
- UsePosition* use_after =
- splitting_pointer_ == nullptr || splitting_pointer_->pos() > position
- ? first_pos()
- : splitting_pointer_;
+ UsePosition* use_after = first_pos();
UsePosition* use_before = nullptr;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
@@ -819,24 +815,16 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
: LiveRange(0, rep, this),
vreg_(vreg),
last_child_id_(0),
- splintered_from_(nullptr),
spill_operand_(nullptr),
spill_move_insertion_locations_(nullptr),
spilled_in_deferred_blocks_(false),
has_preassigned_slot_(false),
spill_start_index_(kMaxInt),
last_pos_(nullptr),
- last_child_covers_(this),
- splinter_(nullptr) {
+ last_child_covers_(this) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
-#if DEBUG
-int TopLevelLiveRange::debug_virt_reg() const {
- return IsSplinter() ? splintered_from()->vreg() : vreg();
-}
-#endif
-
void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
@@ -925,157 +913,6 @@ AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
-void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
- Zone* zone) {
- DCHECK(start != Start() || end != End());
- DCHECK(start < end);
-
- TopLevelLiveRange splinter_temp(-1, representation());
- UsePosition* last_in_splinter = nullptr;
- // Live ranges defined in deferred blocks stay in deferred blocks, so we
- // don't need to splinter them. That means that start should always be
- // after the beginning of the range.
- DCHECK(start > Start());
-
- if (end >= End()) {
- DCHECK(start > Start());
- DetachAt(start, &splinter_temp, zone, ConnectHints);
- next_ = nullptr;
- } else {
- DCHECK(start < End() && Start() < end);
-
- const int kInvalidId = std::numeric_limits<int>::max();
-
- UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints);
-
- LiveRange end_part(kInvalidId, this->representation(), nullptr);
- // The last chunk exits the deferred region, and we don't want to connect
- // hints here, because the non-deferred region shouldn't be affected
- // by allocation decisions on the deferred path.
- last_in_splinter =
- splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints);
-
- next_ = end_part.next_;
- last_interval_->set_next(end_part.first_interval_);
- // The next splinter will happen either at or after the current interval.
- // We can optimize DetachAt by setting current_interval_ accordingly,
- // which will then be picked up by FirstSearchIntervalForPosition.
- current_interval_ = last_interval_;
- last_interval_ = end_part.last_interval_;
-
- if (first_pos_ == nullptr) {
- first_pos_ = end_part.first_pos_;
- } else {
- splitting_pointer_ = last;
- if (last != nullptr) last->set_next(end_part.first_pos_);
- }
- }
-
- if (splinter()->IsEmpty()) {
- splinter()->first_interval_ = splinter_temp.first_interval_;
- splinter()->last_interval_ = splinter_temp.last_interval_;
- } else {
- splinter()->last_interval_->set_next(splinter_temp.first_interval_);
- splinter()->last_interval_ = splinter_temp.last_interval_;
- }
- if (splinter()->first_pos() == nullptr) {
- splinter()->first_pos_ = splinter_temp.first_pos_;
- } else {
- splinter()->last_pos_->set_next(splinter_temp.first_pos_);
- }
- if (last_in_splinter != nullptr) {
- splinter()->last_pos_ = last_in_splinter;
- } else {
- if (splinter()->first_pos() != nullptr &&
- splinter()->last_pos_ == nullptr) {
- splinter()->last_pos_ = splinter()->first_pos();
- for (UsePosition* pos = splinter()->first_pos(); pos != nullptr;
- pos = pos->next()) {
- splinter()->last_pos_ = pos;
- }
- }
- }
-#if DEBUG
- Verify();
- splinter()->Verify();
-#endif
-}
-
-void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
- splintered_from_ = splinter_parent;
- if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
- SetSpillRange(splinter_parent->spill_range_);
- }
-}
-
-void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
- DCHECK(merged->TopLevel() == this);
-
- if (HasNoSpillType() && merged->HasSpillRange()) {
- set_spill_type(merged->spill_type());
- DCHECK_LT(0, GetSpillRange()->live_ranges().size());
- merged->spill_range_ = nullptr;
- merged->bits_ =
- SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
- }
-}
-
-void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
- DCHECK(Start() < other->Start());
- DCHECK(other->splintered_from() == this);
-
- LiveRange* first = this;
- LiveRange* second = other;
- DCHECK(first->Start() < second->Start());
- while (first != nullptr && second != nullptr) {
- DCHECK(first != second);
- // Make sure the ranges are in order each time we iterate.
- if (second->Start() < first->Start()) {
- LiveRange* tmp = second;
- second = first;
- first = tmp;
- continue;
- }
-
- if (first->End() <= second->Start()) {
- if (first->next() == nullptr ||
- first->next()->Start() > second->Start()) {
- // First is in order before second.
- LiveRange* temp = first->next();
- first->next_ = second;
- first = temp;
- } else {
- // First is in order before its successor (or second), so advance first.
- first = first->next();
- }
- continue;
- }
-
- DCHECK(first->Start() < second->Start());
- // If first and second intersect, split first.
- if (first->Start() < second->End() && second->Start() < first->End()) {
- LiveRange* temp = first->SplitAt(second->Start(), zone);
- CHECK(temp != first);
- temp->set_spilled(first->spilled());
- if (!temp->spilled())
- temp->set_assigned_register(first->assigned_register());
-
- first->next_ = second;
- first = temp;
- continue;
- }
- DCHECK(first->End() <= second->Start());
- }
-
- TopLevel()->UpdateParentForAllChildren(TopLevel());
- TopLevel()->UpdateSpillRangePostMerge(other);
- TopLevel()->register_slot_use(other->slot_use_kind());
-
-#if DEBUG
- Verify();
-#endif
-}
-
void TopLevelLiveRange::VerifyChildrenInOrder() const {
LifetimePosition last_end = End();
for (const LiveRange* child = this->next(); child != nullptr;
@@ -1271,8 +1108,7 @@ void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
void LinearScanAllocator::PrintRangeRow(std::ostream& os,
const TopLevelLiveRange* toplevel) {
int position = 0;
- os << std::setw(3) << toplevel->vreg()
- << (toplevel->IsSplinter() ? "s:" : ": ");
+ os << std::setw(3) << toplevel->vreg() << ": ";
const char* kind_string;
switch (toplevel->spill_type()) {
@@ -1340,10 +1176,9 @@ SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
byte_width_(ByteWidthForStackSlot(parent->representation())) {
- // Spill ranges are created for top level, non-splintered ranges. This is so
- // that, when merging decisions are made, we consider the full extent of the
- // virtual register, and avoid clobbering it.
- DCHECK(!parent->IsSplinter());
+ // Spill ranges are created for top level. This is so that, when merging
+ // decisions are made, we consider the full extent of the virtual register,
+ // and avoid clobbering it.
UseInterval* result = nullptr;
UseInterval* node = nullptr;
// Copy the intervals for all ranges.
@@ -1646,32 +1481,16 @@ SpillRange* TopTierRegisterAllocationData::AssignSpillRangeToLiveRange(
SpillRange* spill_range = range->GetAllocatedSpillRange();
if (spill_range == nullptr) {
- DCHECK(!range->IsSplinter());
spill_range = allocation_zone()->New<SpillRange>(range, allocation_zone());
}
if (spill_mode == SpillMode::kSpillDeferred &&
(range->spill_type() != SpillType::kSpillRange)) {
- DCHECK(is_turbo_control_flow_aware_allocation());
range->set_spill_type(SpillType::kDeferredSpillRange);
} else {
range->set_spill_type(SpillType::kSpillRange);
}
- int spill_range_index =
- range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
-
- spill_ranges()[spill_range_index] = spill_range;
-
- return spill_range;
-}
-
-SpillRange* TopTierRegisterAllocationData::CreateSpillRangeForLiveRange(
- TopLevelLiveRange* range) {
- DCHECK(is_turbo_preprocess_ranges());
- DCHECK(!range->HasSpillOperand());
- DCHECK(!range->IsSplinter());
- SpillRange* spill_range =
- allocation_zone()->New<SpillRange>(range, allocation_zone());
+ spill_ranges()[range->vreg()] = spill_range;
return spill_range;
}
@@ -2337,15 +2156,10 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int vreg = unalloc->virtual_register();
live->Add(vreg);
if (unalloc->HasSlotPolicy()) {
- if (data()->is_turbo_control_flow_aware_allocation()) {
- data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
- block->IsDeferred()
- ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
- : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
- } else {
- data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
- TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
- }
+ data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
+ block->IsDeferred()
+ ? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
+ : TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
Use(block_start_position, use_pos, input, spill_mode);
@@ -2917,12 +2731,7 @@ void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
next_pos = next_pos.NextStart();
}
- // With splinters, we can be more strict and skip over positions
- // not strictly needing registers.
- UsePosition* pos =
- range->IsSplinter()
- ? range->NextRegisterPosition(next_pos)
- : range->NextUsePositionRegisterIsBeneficial(next_pos);
+ UsePosition* pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
// If the range already has a spill operand and it doesn't need a
// register immediately, split it and spill the first part of the range.
if (pos == nullptr) {
@@ -3025,66 +2834,42 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
if (loop_header == nullptr) return pos;
- if (data()->is_turbo_control_flow_aware_allocation()) {
- while (loop_header != nullptr) {
- // We are going to spill live range inside the loop.
- // If possible try to move spilling position backwards to loop header.
- // This will reduce number of memory moves on the back edge.
- LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
- loop_header->first_instruction_index());
- // Stop if we moved to a loop header before the value is defined or
- // at the define position that is not beneficial to spill.
- if (range->TopLevel()->Start() > loop_start ||
- (range->TopLevel()->Start() == loop_start &&
- range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
- return pos;
-
- LiveRange* live_at_header = range->TopLevel()->GetChildCovers(loop_start);
-
- if (live_at_header != nullptr && !live_at_header->spilled()) {
- for (LiveRange* check_use = live_at_header;
- check_use != nullptr && check_use->Start() < pos;
- check_use = check_use->next()) {
- // If we find a use for which spilling is detrimental, don't spill
- // at the loop header
- UsePosition* next_use =
- check_use->NextUsePositionSpillDetrimental(loop_start);
- // UsePosition at the end of a UseInterval may
- // have the same value as the start of next range.
- if (next_use != nullptr && next_use->pos() <= pos) {
- return pos;
- }
+ while (loop_header != nullptr) {
+ // We are going to spill live range inside the loop.
+ // If possible try to move spilling position backwards to loop header.
+ // This will reduce number of memory moves on the back edge.
+ LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
+ loop_header->first_instruction_index());
+ // Stop if we moved to a loop header before the value is defined or
+ // at the define position that is not beneficial to spill.
+ if (range->TopLevel()->Start() > loop_start ||
+ (range->TopLevel()->Start() == loop_start &&
+ range->TopLevel()->SpillAtLoopHeaderNotBeneficial()))
+ return pos;
+
+ LiveRange* live_at_header = range->TopLevel()->GetChildCovers(loop_start);
+
+ if (live_at_header != nullptr && !live_at_header->spilled()) {
+ for (LiveRange* check_use = live_at_header;
+ check_use != nullptr && check_use->Start() < pos;
+ check_use = check_use->next()) {
+ // If we find a use for which spilling is detrimental, don't spill
+ // at the loop header
+ UsePosition* next_use =
+ check_use->NextUsePositionSpillDetrimental(loop_start);
+ // UsePosition at the end of a UseInterval may
+ // have the same value as the start of next range.
+ if (next_use != nullptr && next_use->pos() <= pos) {
+ return pos;
}
- // No register beneficial use inside the loop before the pos.
- *begin_spill_out = live_at_header;
- pos = loop_start;
}
-
- // Try hoisting out to an outer loop.
- loop_header = GetContainingLoop(code(), loop_header);
+ // No register beneficial use inside the loop before the pos.
+ *begin_spill_out = live_at_header;
+ pos = loop_start;
}
- } else {
- const UsePosition* prev_use =
- range->PreviousUsePositionRegisterIsBeneficial(pos);
-
- while (loop_header != nullptr) {
- // We are going to spill live range inside the loop.
- // If possible try to move spilling position backwards to loop header
- // inside the current range. This will reduce number of memory moves on
- // the back edge.
- LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex(
- loop_header->first_instruction_index());
-
- if (range->Covers(loop_start)) {
- if (prev_use == nullptr || prev_use->pos() < loop_start) {
- // No register beneficial use inside the loop before the pos.
- pos = loop_start;
- }
- }
- // Try hoisting out to an outer loop.
- loop_header = GetContainingLoop(code(), loop_header);
- }
+ // Try hoisting out to an outer loop.
+ loop_header = GetContainingLoop(code(), loop_header);
}
return pos;
}
@@ -3761,9 +3546,7 @@ void LinearScanAllocator::AllocateRegisters() {
// those. Not only does this produce a potentially bad assignment, it also
// breaks with the invariant that we undo spills that happen in deferred code
// when crossing a deferred/non-deferred boundary.
- while (!unhandled_live_ranges().empty() ||
- (data()->is_turbo_control_flow_aware_allocation() &&
- last_block < max_blocks)) {
+ while (!unhandled_live_ranges().empty() || last_block < max_blocks) {
data()->tick_counter()->TickAndMaybeEnterSafepoint();
LiveRange* current = unhandled_live_ranges().empty()
? nullptr
@@ -3773,160 +3556,155 @@ void LinearScanAllocator::AllocateRegisters() {
#ifdef DEBUG
allocation_finger_ = position;
#endif
- if (data()->is_turbo_control_flow_aware_allocation()) {
- // Splintering is not supported.
- CHECK(!data()->is_turbo_preprocess_ranges());
- // Check whether we just moved across a block boundary. This will trigger
- // for the first range that is past the current boundary.
- if (position >= next_block_boundary) {
- TRACE("Processing boundary at %d leaving %d\n",
- next_block_boundary.value(), last_block.ToInt());
-
- // Forward state to before block boundary
- LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
- ForwardStateTo(end_of_block);
-
- // Remember this state.
- InstructionBlock* current_block = data()->code()->GetInstructionBlock(
- next_block_boundary.ToInstructionIndex());
-
- // Store current spill state (as the state at end of block). For
- // simplicity, we store the active ranges, e.g., the live ranges that
- // are not spilled.
- data()->RememberSpillState(last_block, active_live_ranges());
-
- // Only reset the state if this was not a direct fallthrough. Otherwise
- // control flow resolution will get confused (it does not expect changes
- // across fallthrough edges.).
- bool fallthrough = (current_block->PredecessorCount() == 1) &&
- current_block->predecessors()[0].IsNext(
- current_block->rpo_number());
-
- // When crossing a deferred/non-deferred boundary, we have to load or
- // remove the deferred fixed ranges from inactive.
- if ((spill_mode == SpillMode::kSpillDeferred) !=
- current_block->IsDeferred()) {
- // Update spill mode.
- spill_mode = current_block->IsDeferred()
- ? SpillMode::kSpillDeferred
- : SpillMode::kSpillAtDefinition;
-
- ForwardStateTo(next_block_boundary);
+ // Check whether we just moved across a block boundary. This will trigger
+ // for the first range that is past the current boundary.
+ if (position >= next_block_boundary) {
+ TRACE("Processing boundary at %d leaving %d\n",
+ next_block_boundary.value(), last_block.ToInt());
+
+ // Forward state to before block boundary
+ LifetimePosition end_of_block = next_block_boundary.PrevStart().End();
+ ForwardStateTo(end_of_block);
+
+ // Remember this state.
+ InstructionBlock* current_block = data()->code()->GetInstructionBlock(
+ next_block_boundary.ToInstructionIndex());
+
+ // Store current spill state (as the state at end of block). For
+ // simplicity, we store the active ranges, e.g., the live ranges that
+ // are not spilled.
+ data()->RememberSpillState(last_block, active_live_ranges());
+
+ // Only reset the state if this was not a direct fallthrough. Otherwise
+ // control flow resolution will get confused (it does not expect changes
+ // across fallthrough edges.).
+ bool fallthrough =
+ (current_block->PredecessorCount() == 1) &&
+ current_block->predecessors()[0].IsNext(current_block->rpo_number());
+
+ // When crossing a deferred/non-deferred boundary, we have to load or
+ // remove the deferred fixed ranges from inactive.
+ if ((spill_mode == SpillMode::kSpillDeferred) !=
+ current_block->IsDeferred()) {
+ // Update spill mode.
+ spill_mode = current_block->IsDeferred()
+ ? SpillMode::kSpillDeferred
+ : SpillMode::kSpillAtDefinition;
+
+ ForwardStateTo(next_block_boundary);
#ifdef DEBUG
- // Allow allocation at current position.
- allocation_finger_ = next_block_boundary;
+ // Allow allocation at current position.
+ allocation_finger_ = next_block_boundary;
#endif
- UpdateDeferredFixedRanges(spill_mode, current_block);
- }
+ UpdateDeferredFixedRanges(spill_mode, current_block);
+ }
- // Allocation relies on the fact that each non-deferred block has at
- // least one non-deferred predecessor. Check this invariant here.
- DCHECK_IMPLIES(!current_block->IsDeferred(),
- HasNonDeferredPredecessor(current_block));
+ // Allocation relies on the fact that each non-deferred block has at
+ // least one non-deferred predecessor. Check this invariant here.
+ DCHECK_IMPLIES(!current_block->IsDeferred(),
+ HasNonDeferredPredecessor(current_block));
- if (!fallthrough) {
+ if (!fallthrough) {
#ifdef DEBUG
- // Allow allocation at current position.
- allocation_finger_ = next_block_boundary;
+ // Allow allocation at current position.
+ allocation_finger_ = next_block_boundary;
#endif
- // We are currently at next_block_boundary - 1. Move the state to the
- // actual block boundary position. In particular, we have to
- // reactivate inactive ranges so that they get rescheduled for
- // allocation if they were not live at the predecessors.
- ForwardStateTo(next_block_boundary);
-
- RangeWithRegisterSet to_be_live(data()->allocation_zone());
-
- // If we end up deciding to use the state of the immediate
- // predecessor, it is better not to perform a change. It would lead to
- // the same outcome anyway.
- // This may never happen on boundaries between deferred and
- // non-deferred code, as we rely on explicit respill to ensure we
- // spill at definition.
- bool no_change_required = false;
-
- auto pick_state_from = [this, current_block](
- RpoNumber pred,
- RangeWithRegisterSet* to_be_live) -> bool {
- TRACE("Using information from B%d\n", pred.ToInt());
- // If this is a fall-through that is not across a deferred
- // boundary, there is nothing to do.
- bool is_noop = pred.IsNext(current_block->rpo_number());
- if (!is_noop) {
- auto& spill_state = data()->GetSpillState(pred);
- TRACE("Not a fallthrough. Adding %zu elements...\n",
- spill_state.size());
- LifetimePosition pred_end =
- LifetimePosition::GapFromInstructionIndex(
- this->code()->InstructionBlockAt(pred)->code_end());
- for (const auto range : spill_state) {
- // Filter out ranges that were split or had their register
- // stolen by backwards working spill heuristics. These have
- // been spilled after the fact, so ignore them.
- if (range->End() < pred_end || !range->HasRegisterAssigned())
- continue;
- to_be_live->emplace(range);
- }
- }
- return is_noop;
- };
-
- // Multiple cases here:
- // 1) We have a single predecessor => this is a control flow split, so
- // just restore the predecessor state.
- // 2) We have two predecessors => this is a conditional, so break ties
- // based on what to do based on forward uses, trying to benefit
- // the same branch if in doubt (make one path fast).
- // 3) We have many predecessors => this is a switch. Compute union
- // based on majority, break ties by looking forward.
- if (current_block->PredecessorCount() == 1) {
- TRACE("Single predecessor for B%d\n",
- current_block->rpo_number().ToInt());
- no_change_required =
- pick_state_from(current_block->predecessors()[0], &to_be_live);
- } else if (current_block->PredecessorCount() == 2) {
- TRACE("Two predecessors for B%d\n",
- current_block->rpo_number().ToInt());
- // If one of the branches does not contribute any information,
- // e.g. because it is deferred or a back edge, we can short cut
- // here right away.
- RpoNumber chosen_predecessor = RpoNumber::Invalid();
- if (!ConsiderBlockForControlFlow(
- current_block, current_block->predecessors()[0])) {
- chosen_predecessor = current_block->predecessors()[1];
- } else if (!ConsiderBlockForControlFlow(
- current_block, current_block->predecessors()[1])) {
- chosen_predecessor = current_block->predecessors()[0];
- } else {
- chosen_predecessor = ChooseOneOfTwoPredecessorStates(
- current_block, next_block_boundary);
+ // We are currently at next_block_boundary - 1. Move the state to the
+ // actual block boundary position. In particular, we have to
+ // reactivate inactive ranges so that they get rescheduled for
+ // allocation if they were not live at the predecessors.
+ ForwardStateTo(next_block_boundary);
+
+ RangeWithRegisterSet to_be_live(data()->allocation_zone());
+
+ // If we end up deciding to use the state of the immediate
+ // predecessor, it is better not to perform a change. It would lead to
+ // the same outcome anyway.
+ // This may never happen on boundaries between deferred and
+ // non-deferred code, as we rely on explicit respill to ensure we
+ // spill at definition.
+ bool no_change_required = false;
+
+ auto pick_state_from = [this, current_block](
+ RpoNumber pred,
+ RangeWithRegisterSet* to_be_live) -> bool {
+ TRACE("Using information from B%d\n", pred.ToInt());
+ // If this is a fall-through that is not across a deferred
+ // boundary, there is nothing to do.
+ bool is_noop = pred.IsNext(current_block->rpo_number());
+ if (!is_noop) {
+ auto& spill_state = data()->GetSpillState(pred);
+ TRACE("Not a fallthrough. Adding %zu elements...\n",
+ spill_state.size());
+ LifetimePosition pred_end =
+ LifetimePosition::GapFromInstructionIndex(
+ this->code()->InstructionBlockAt(pred)->code_end());
+ for (const auto range : spill_state) {
+ // Filter out ranges that were split or had their register
+ // stolen by backwards working spill heuristics. These have
+ // been spilled after the fact, so ignore them.
+ if (range->End() < pred_end || !range->HasRegisterAssigned())
+ continue;
+ to_be_live->emplace(range);
}
- no_change_required =
- pick_state_from(chosen_predecessor, &to_be_live);
-
+ }
+ return is_noop;
+ };
+
+ // Multiple cases here:
+ // 1) We have a single predecessor => this is a control flow split, so
+ // just restore the predecessor state.
+ // 2) We have two predecessors => this is a conditional, so break ties
+ // based on what to do based on forward uses, trying to benefit
+ // the same branch if in doubt (make one path fast).
+ // 3) We have many predecessors => this is a switch. Compute union
+ // based on majority, break ties by looking forward.
+ if (current_block->PredecessorCount() == 1) {
+ TRACE("Single predecessor for B%d\n",
+ current_block->rpo_number().ToInt());
+ no_change_required =
+ pick_state_from(current_block->predecessors()[0], &to_be_live);
+ } else if (current_block->PredecessorCount() == 2) {
+ TRACE("Two predecessors for B%d\n",
+ current_block->rpo_number().ToInt());
+ // If one of the branches does not contribute any information,
+ // e.g. because it is deferred or a back edge, we can short cut
+ // here right away.
+ RpoNumber chosen_predecessor = RpoNumber::Invalid();
+ if (!ConsiderBlockForControlFlow(current_block,
+ current_block->predecessors()[0])) {
+ chosen_predecessor = current_block->predecessors()[1];
+ } else if (!ConsiderBlockForControlFlow(
+ current_block, current_block->predecessors()[1])) {
+ chosen_predecessor = current_block->predecessors()[0];
} else {
- // Merge at the end of, e.g., a switch.
- ComputeStateFromManyPredecessors(current_block, &to_be_live);
+ chosen_predecessor = ChooseOneOfTwoPredecessorStates(
+ current_block, next_block_boundary);
}
+ no_change_required = pick_state_from(chosen_predecessor, &to_be_live);
- if (!no_change_required) {
- SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
- ReloadLiveRanges(to_be_live, next_block_boundary);
- }
+ } else {
+ // Merge at the end of, e.g., a switch.
+ ComputeStateFromManyPredecessors(current_block, &to_be_live);
+ }
+
+ if (!no_change_required) {
+ SpillNotLiveRanges(&to_be_live, next_block_boundary, spill_mode);
+ ReloadLiveRanges(to_be_live, next_block_boundary);
}
- // Update block information
- last_block = current_block->rpo_number();
- next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
- current_block->last_instruction_index())
- .NextFullStart();
-
- // We might have created new unhandled live ranges, so cycle around the
- // loop to make sure we pick the top most range in unhandled for
- // processing.
- continue;
}
+ // Update block information
+ last_block = current_block->rpo_number();
+ next_block_boundary = LifetimePosition::InstructionFromInstructionIndex(
+ current_block->last_instruction_index())
+ .NextFullStart();
+
+ // We might have created new unhandled live ranges, so cycle around the
+ // loop to make sure we pick the top most range in unhandled for
+ // processing.
+ continue;
}
DCHECK_NOT_NULL(current);
@@ -3952,28 +3730,6 @@ void LinearScanAllocator::AllocateRegisters() {
}
}
-bool LinearScanAllocator::TrySplitAndSpillSplinter(LiveRange* range) {
- DCHECK(!data()->is_turbo_control_flow_aware_allocation());
- DCHECK(range->TopLevel()->IsSplinter());
- // If we can spill the whole range, great. Otherwise, split above the
- // first use needing a register and spill the top part.
- const UsePosition* next_reg = range->NextRegisterPosition(range->Start());
- if (next_reg == nullptr) {
- Spill(range, SpillMode::kSpillAtDefinition);
- return true;
- } else if (range->FirstHintPosition() == nullptr) {
- // If there was no hint, but we have a use position requiring a
- // register, apply the hot path heuristics.
- return false;
- } else if (next_reg->pos().PrevStart() > range->Start()) {
- LiveRange* tail = SplitRangeAt(range, next_reg->pos().PrevStart());
- AddToUnhandled(tail);
- Spill(range, SpillMode::kSpillAtDefinition);
- return true;
- }
- return false;
-}
-
void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
int reg) {
data()->MarkAllocated(range->representation(), reg);
@@ -4198,49 +3954,17 @@ void LinearScanAllocator::FindFreeRegistersForRange(
// High-level register allocation summary:
//
-// For regular, or hot (i.e. not splinter) ranges, we attempt to first
-// allocate first the preferred (hint) register. If that is not possible,
-// we find a register that's free, and allocate that. If that's not possible,
-// we search for a register to steal from a range that was allocated. The
-// goal is to optimize for throughput by avoiding register-to-memory
-// moves, which are expensive.
-//
-// For splinters, the goal is to minimize the number of moves. First we try
-// to allocate the preferred register (more discussion follows). Failing that,
-// we bail out and spill as far as we can, unless the first use is at start,
-// case in which we apply the same behavior as we do for regular ranges.
-// If there is no hint, we apply the hot-path behavior.
-//
-// For the splinter, the hint register may come from:
-//
-// - the hot path (we set it at splintering time with SetHint). In this case, if
-// we cannot offer the hint register, spilling is better because it's at most
-// 1 move, while trying to find and offer another register is at least 1 move.
-//
-// - a constraint. If we cannot offer that register, it's because there is some
-// interference. So offering the hint register up to the interference would
-// result
-// in a move at the interference, plus a move to satisfy the constraint. This is
-// also the number of moves if we spill, with the potential of the range being
-// already spilled and thus saving a move (the spill).
-// Note that this can only be an input constraint, if it were an output one,
-// the range wouldn't be a splinter because it means it'd be defined in a
-// deferred
-// block, and we don't mark those as splinters (they live in deferred blocks
-// only).
-//
-// - a phi. The same analysis as in the case of the input constraint applies.
-//
+// We attempt to first allocate the preferred (hint) register. If that is not
+// possible, we find a register that's free, and allocate that. If that's not
+// possible, we search for a register to steal from a range that was allocated.
+// The goal is to optimize for throughput by avoiding register-to-memory moves,
+// which are expensive.
void LinearScanAllocator::ProcessCurrentRange(LiveRange* current,
SpillMode spill_mode) {
EmbeddedVector<LifetimePosition, RegisterConfiguration::kMaxRegisters>
free_until_pos;
FindFreeRegistersForRange(current, free_until_pos);
if (!TryAllocatePreferredReg(current, free_until_pos)) {
- if (current->TopLevel()->IsSplinter()) {
- DCHECK(!data()->is_turbo_control_flow_aware_allocation());
- if (TrySplitAndSpillSplinter(current)) return;
- }
if (!TryAllocateFreeReg(current, free_until_pos)) {
AllocateBlockedReg(current, spill_mode);
}
@@ -4725,30 +4449,26 @@ OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
: data_(data) {}
void OperandAssigner::DecideSpillingMode() {
- if (data()->is_turbo_control_flow_aware_allocation()) {
- for (auto range : data()->live_ranges()) {
- data()->tick_counter()->TickAndMaybeEnterSafepoint();
- int max_blocks = data()->code()->InstructionBlockCount();
- if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
- // If the range is spilled only in deferred blocks and starts in
- // a non-deferred block, we transition its representation here so
- // that the LiveRangeConnector processes them correctly. If,
- // however, they start in a deferred block, we uograde them to
- // spill at definition, as that definition is in a deferred block
- // anyway. While this is an optimization, the code in LiveRangeConnector
- // relies on it!
- if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
- TRACE("Live range %d is spilled and alive in deferred code only\n",
- range->vreg());
- range->TransitionRangeToSpillAtDefinition();
- } else {
- TRACE(
- "Live range %d is spilled deferred code only but alive outside\n",
+ for (auto range : data()->live_ranges()) {
+ data()->tick_counter()->TickAndMaybeEnterSafepoint();
+ int max_blocks = data()->code()->InstructionBlockCount();
+ if (range != nullptr && range->IsSpilledOnlyInDeferredBlocks(data())) {
+ // If the range is spilled only in deferred blocks and starts in
+ // a non-deferred block, we transition its representation here so
+ // that the LiveRangeConnector processes them correctly. If,
+ // however, they start in a deferred block, we uograde them to
+ // spill at definition, as that definition is in a deferred block
+ // anyway. While this is an optimization, the code in LiveRangeConnector
+ // relies on it!
+ if (GetInstructionBlock(data()->code(), range->Start())->IsDeferred()) {
+ TRACE("Live range %d is spilled and alive in deferred code only\n",
range->vreg());
- DCHECK(data()->is_turbo_control_flow_aware_allocation());
- range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
- max_blocks);
- }
+ range->TransitionRangeToSpillAtDefinition();
+ } else {
+ TRACE("Live range %d is spilled deferred code only but alive outside\n",
+ range->vreg());
+ range->TransitionRangeToDeferredSpill(data()->allocation_zone(),
+ max_blocks);
}
}
}
diff --git a/deps/v8/src/compiler/backend/register-allocator.h b/deps/v8/src/compiler/backend/register-allocator.h
index a9dc2900f1..87c0afbcfc 100644
--- a/deps/v8/src/compiler/backend/register-allocator.h
+++ b/deps/v8/src/compiler/backend/register-allocator.h
@@ -175,11 +175,7 @@ class LifetimePosition final {
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos);
-enum class RegisterAllocationFlag : unsigned {
- kTurboControlFlowAwareAllocation = 1 << 0,
- kTurboPreprocessRanges = 1 << 1,
- kTraceAllocation = 1 << 2
-};
+enum class RegisterAllocationFlag : unsigned { kTraceAllocation = 1 << 0 };
using RegisterAllocationFlags = base::Flags<RegisterAllocationFlag>;
@@ -210,14 +206,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
// regular code (kSpillAtDefinition).
enum SpillMode { kSpillAtDefinition, kSpillDeferred };
- bool is_turbo_control_flow_aware_allocation() const {
- return flags_ & RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
- }
-
- bool is_turbo_preprocess_ranges() const {
- return flags_ & RegisterAllocationFlag::kTurboPreprocessRanges;
- }
-
bool is_trace_alloc() {
return flags_ & RegisterAllocationFlag::kTraceAllocation;
}
@@ -615,7 +603,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos) const;
- // Splitting primitive used by both splitting and splintering members.
+ // Splitting primitive used by splitting members.
// Performs the split, but does not link the resulting ranges.
// The given position must follow the start of the range.
// All uses following the given position will be moved from this
@@ -708,7 +696,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
using ControlFlowRegisterHint = base::BitField<uint8_t, 22, 6>;
// Bits 28-31 are used by TopLevelLiveRange.
- // Unique among children and splinters of the same virtual register.
+ // Unique among children of the same virtual register.
int relative_id_;
uint32_t bits_;
UseInterval* last_interval_;
@@ -720,8 +708,6 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
mutable UseInterval* current_interval_;
// This is used as a cache, it doesn't affect correctness.
mutable UsePosition* last_processed_use_;
- // Cache the last position splintering stopped at.
- mutable UsePosition* splitting_pointer_;
// This is used as a cache in BuildLiveRanges and during register allocation.
UsePosition* current_hint_position_;
LiveRangeBundle* bundle_ = nullptr;
@@ -853,16 +839,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start, bool trace_alloc);
- // Detaches between start and end, and attributes the resulting range to
- // result.
- // The current range is pointed to as "splintered_from". No parent/child
- // relationship is established between this and result.
- void Splinter(LifetimePosition start, LifetimePosition end, Zone* zone);
-
- // Assuming other was splintered from this range, embeds other and its
- // children as part of the children sequence of this range.
- void Merge(TopLevelLiveRange* other, Zone* zone);
-
// Spill range management.
void SetSpillRange(SpillRange* spill_range);
@@ -963,19 +939,12 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
}
}
- TopLevelLiveRange* splintered_from() const { return splintered_from_; }
- bool IsSplinter() const { return splintered_from_ != nullptr; }
bool MayRequireSpillRange() const {
- DCHECK(!IsSplinter());
return !HasSpillOperand() && spill_range_ == nullptr;
}
void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
int vreg() const { return vreg_; }
-#if DEBUG
- int debug_virt_reg() const;
-#endif
-
void Verify() const;
void VerifyChildrenInOrder() const;
@@ -985,19 +954,13 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// if you call it with a non-decreasing sequence of positions.
LiveRange* GetChildCovers(LifetimePosition pos);
- int GetNextChildId() {
- return IsSplinter() ? splintered_from()->GetNextChildId()
- : ++last_child_id_;
- }
+ int GetNextChildId() { return ++last_child_id_; }
int GetMaxChildCount() const { return last_child_id_ + 1; }
bool IsSpilledOnlyInDeferredBlocks(
const TopTierRegisterAllocationData* data) const {
- if (data->is_turbo_control_flow_aware_allocation()) {
- return spill_type() == SpillType::kDeferredSpillRange;
- }
- return spilled_in_deferred_blocks_;
+ return spill_type() == SpillType::kDeferredSpillRange;
}
struct SpillMoveInsertionList;
@@ -1007,17 +970,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
DCHECK(!IsSpilledOnlyInDeferredBlocks(data));
return spill_move_insertion_locations_;
}
- TopLevelLiveRange* splinter() const { return splinter_; }
- void SetSplinter(TopLevelLiveRange* splinter) {
- DCHECK_NULL(splinter_);
- DCHECK_NOT_NULL(splinter);
-
- splinter_ = splinter;
- splinter->relative_id_ = GetNextChildId();
- splinter->set_spill_type(spill_type());
- splinter->SetSplinteredFrom(this);
- if (bundle_ != nullptr) splinter->set_bundle(bundle_);
- }
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
@@ -1056,7 +1008,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
private:
friend class LiveRange;
- void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
// If spill type is kSpillRange, then this value indicates whether we've
// chosen to spill at the definition or at some later points.
@@ -1076,7 +1027,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int vreg_;
int last_child_id_;
- TopLevelLiveRange* splintered_from_;
union {
// Correct value determined by spill_type()
InstructionOperand* spill_operand_;
@@ -1096,7 +1046,6 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
int spill_start_index_;
UsePosition* last_pos_;
LiveRange* last_child_covers_;
- TopLevelLiveRange* splinter_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@@ -1310,11 +1259,8 @@ class LiveRangeBuilder final : public ZoneObject {
spill_mode);
}
SpillMode SpillModeForBlock(const InstructionBlock* block) const {
- if (data()->is_turbo_control_flow_aware_allocation()) {
- return block->IsDeferred() ? SpillMode::kSpillDeferred
- : SpillMode::kSpillAtDefinition;
- }
- return SpillMode::kSpillAtDefinition;
+ return block->IsDeferred() ? SpillMode::kSpillDeferred
+ : SpillMode::kSpillAtDefinition;
}
TopTierRegisterAllocationData* const data_;
ZoneMap<InstructionOperand*, UsePosition*> phi_hints_;
@@ -1529,7 +1475,6 @@ class LinearScanAllocator final : public RegisterAllocator {
Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
void AllocateBlockedReg(LiveRange* range, SpillMode spill_mode);
- bool TrySplitAndSpillSplinter(LiveRange* range);
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos, SpillMode spill_mode);
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 12b1167d4d..f3ab25630f 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -643,184 +643,184 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ MovFromFloatResult(i.OutputDoubleRegister()); \
} while (0)
-#define ASSEMBLE_DOUBLE_MAX() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ cdbr(left_reg, right_reg); \
- __ bunordered(&check_nan_left, Label::kNear); \
- __ beq(&check_zero); \
- __ bge(&return_left, Label::kNear); \
- __ b(&return_right, Label::kNear); \
- \
- __ bind(&check_zero); \
- __ lzdr(kDoubleRegZero); \
- __ cdbr(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left, Label::kNear); \
- /* At this point, both left and right are either 0 or -0. */ \
- /* N.B. The following works because +0 + -0 == +0 */ \
- /* For max we want logical-and of sign bit: (L + R) */ \
- __ ldr(result_reg, left_reg); \
- __ adbr(result_reg, right_reg); \
- __ b(&done, Label::kNear); \
- \
- __ bind(&check_nan_left); \
- __ cdbr(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left, Label::kNear); \
- \
- __ bind(&return_right); \
- if (right_reg != result_reg) { \
- __ ldr(result_reg, right_reg); \
- } \
- __ b(&done, Label::kNear); \
- \
- __ bind(&return_left); \
- if (left_reg != result_reg) { \
- __ ldr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_DOUBLE_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_zero, return_left, return_right, return_nan, done; \
+ __ cdbr(left_reg, right_reg); \
+ __ bunordered(&return_nan, Label::kNear); \
+ __ beq(&check_zero); \
+ __ bge(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cdbr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For max we want logical-and of sign bit: (L + R) */ \
+ __ ldr(result_reg, left_reg); \
+ __ adbr(result_reg, right_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, adbr propagates the appropriate one.*/ \
+ __ adbr(left_reg, right_reg); \
+ __ b(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_DOUBLE_MIN() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ cdbr(left_reg, right_reg); \
- __ bunordered(&check_nan_left, Label::kNear); \
- __ beq(&check_zero); \
- __ ble(&return_left, Label::kNear); \
- __ b(&return_right, Label::kNear); \
- \
- __ bind(&check_zero); \
- __ lzdr(kDoubleRegZero); \
- __ cdbr(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left, Label::kNear); \
- /* At this point, both left and right are either 0 or -0. */ \
- /* N.B. The following works because +0 + -0 == +0 */ \
- /* For min we want logical-or of sign bit: -(-L + -R) */ \
- __ lcdbr(left_reg, left_reg); \
- __ ldr(result_reg, left_reg); \
- if (left_reg == right_reg) { \
- __ adbr(result_reg, right_reg); \
- } else { \
- __ sdbr(result_reg, right_reg); \
- } \
- __ lcdbr(result_reg, result_reg); \
- __ b(&done, Label::kNear); \
- \
- __ bind(&check_nan_left); \
- __ cdbr(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left, Label::kNear); \
- \
- __ bind(&return_right); \
- if (right_reg != result_reg) { \
- __ ldr(result_reg, right_reg); \
- } \
- __ b(&done, Label::kNear); \
- \
- __ bind(&return_left); \
- if (left_reg != result_reg) { \
- __ ldr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_DOUBLE_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_zero, return_left, return_right, return_nan, done; \
+ __ cdbr(left_reg, right_reg); \
+ __ bunordered(&return_nan, Label::kNear); \
+ __ beq(&check_zero); \
+ __ ble(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cdbr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For min we want logical-or of sign bit: -(-L + -R) */ \
+ __ lcdbr(left_reg, left_reg); \
+ __ ldr(result_reg, left_reg); \
+ if (left_reg == right_reg) { \
+ __ adbr(result_reg, right_reg); \
+ } else { \
+ __ sdbr(result_reg, right_reg); \
+ } \
+ __ lcdbr(result_reg, result_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, adbr propagates the appropriate one.*/ \
+ __ adbr(left_reg, right_reg); \
+ __ b(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_FLOAT_MAX() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ cebr(left_reg, right_reg); \
- __ bunordered(&check_nan_left, Label::kNear); \
- __ beq(&check_zero); \
- __ bge(&return_left, Label::kNear); \
- __ b(&return_right, Label::kNear); \
- \
- __ bind(&check_zero); \
- __ lzdr(kDoubleRegZero); \
- __ cebr(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left, Label::kNear); \
- /* At this point, both left and right are either 0 or -0. */ \
- /* N.B. The following works because +0 + -0 == +0 */ \
- /* For max we want logical-and of sign bit: (L + R) */ \
- __ ldr(result_reg, left_reg); \
- __ aebr(result_reg, right_reg); \
- __ b(&done, Label::kNear); \
- \
- __ bind(&check_nan_left); \
- __ cebr(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left, Label::kNear); \
- \
- __ bind(&return_right); \
- if (right_reg != result_reg) { \
- __ ldr(result_reg, right_reg); \
- } \
- __ b(&done, Label::kNear); \
- \
- __ bind(&return_left); \
- if (left_reg != result_reg) { \
- __ ldr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_FLOAT_MAX() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_zero, return_left, return_right, return_nan, done; \
+ __ cebr(left_reg, right_reg); \
+ __ bunordered(&return_nan, Label::kNear); \
+ __ beq(&check_zero); \
+ __ bge(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cebr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For max we want logical-and of sign bit: (L + R) */ \
+ __ ldr(result_reg, left_reg); \
+ __ aebr(result_reg, right_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, aebr propagates the appropriate one.*/ \
+ __ aebr(left_reg, right_reg); \
+ __ b(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_FLOAT_MIN() \
- do { \
- DoubleRegister left_reg = i.InputDoubleRegister(0); \
- DoubleRegister right_reg = i.InputDoubleRegister(1); \
- DoubleRegister result_reg = i.OutputDoubleRegister(); \
- Label check_nan_left, check_zero, return_left, return_right, done; \
- __ cebr(left_reg, right_reg); \
- __ bunordered(&check_nan_left, Label::kNear); \
- __ beq(&check_zero); \
- __ ble(&return_left, Label::kNear); \
- __ b(&return_right, Label::kNear); \
- \
- __ bind(&check_zero); \
- __ lzdr(kDoubleRegZero); \
- __ cebr(left_reg, kDoubleRegZero); \
- /* left == right != 0. */ \
- __ bne(&return_left, Label::kNear); \
- /* At this point, both left and right are either 0 or -0. */ \
- /* N.B. The following works because +0 + -0 == +0 */ \
- /* For min we want logical-or of sign bit: -(-L + -R) */ \
- __ lcebr(left_reg, left_reg); \
- __ ldr(result_reg, left_reg); \
- if (left_reg == right_reg) { \
- __ aebr(result_reg, right_reg); \
- } else { \
- __ sebr(result_reg, right_reg); \
- } \
- __ lcebr(result_reg, result_reg); \
- __ b(&done, Label::kNear); \
- \
- __ bind(&check_nan_left); \
- __ cebr(left_reg, left_reg); \
- /* left == NaN. */ \
- __ bunordered(&return_left, Label::kNear); \
- \
- __ bind(&return_right); \
- if (right_reg != result_reg) { \
- __ ldr(result_reg, right_reg); \
- } \
- __ b(&done, Label::kNear); \
- \
- __ bind(&return_left); \
- if (left_reg != result_reg) { \
- __ ldr(result_reg, left_reg); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_FLOAT_MIN() \
+ do { \
+ DoubleRegister left_reg = i.InputDoubleRegister(0); \
+ DoubleRegister right_reg = i.InputDoubleRegister(1); \
+ DoubleRegister result_reg = i.OutputDoubleRegister(); \
+ Label check_zero, return_left, return_right, return_nan, done; \
+ __ cebr(left_reg, right_reg); \
+ __ bunordered(&return_nan, Label::kNear); \
+ __ beq(&check_zero); \
+ __ ble(&return_left, Label::kNear); \
+ __ b(&return_right, Label::kNear); \
+ \
+ __ bind(&check_zero); \
+ __ lzdr(kDoubleRegZero); \
+ __ cebr(left_reg, kDoubleRegZero); \
+ /* left == right != 0. */ \
+ __ bne(&return_left, Label::kNear); \
+ /* At this point, both left and right are either 0 or -0. */ \
+ /* N.B. The following works because +0 + -0 == +0 */ \
+ /* For min we want logical-or of sign bit: -(-L + -R) */ \
+ __ lcebr(left_reg, left_reg); \
+ __ ldr(result_reg, left_reg); \
+ if (left_reg == right_reg) { \
+ __ aebr(result_reg, right_reg); \
+ } else { \
+ __ sebr(result_reg, right_reg); \
+ } \
+ __ lcebr(result_reg, result_reg); \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_nan); \
+ /* If left or right are NaN, aebr propagates the appropriate one.*/ \
+ __ aebr(left_reg, right_reg); \
+ __ b(&return_left, Label::kNear); \
+ \
+ __ bind(&return_right); \
+ if (right_reg != result_reg) { \
+ __ ldr(result_reg, right_reg); \
+ } \
+ __ b(&done, Label::kNear); \
+ \
+ __ bind(&return_left); \
+ if (left_reg != result_reg) { \
+ __ ldr(result_reg, left_reg); \
+ } \
+ __ bind(&done); \
} while (0)
//
// Only MRI mode for these instructions available
@@ -2581,7 +2581,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_LoadReverseSimd128: {
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
+ is_uint12(operand.offset())) {
__ vlbr(i.OutputSimd128Register(), operand, Condition(4));
} else {
__ lrvg(r0, operand);
@@ -2643,7 +2644,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
size_t index = 0;
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode, &index);
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) &&
+ is_uint12(operand.offset())) {
__ vstbr(i.InputSimd128Register(index), operand, Condition(4));
} else {
__ vlgv(r0, i.InputSimd128Register(index), MemOperand(r0, 1),
@@ -3280,8 +3282,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register scratch_0 = r0;
Register scratch_1 = r1;
for (int i = 0; i < 2; i++) {
- __ vlgv(scratch_0, src0, MemOperand(r0, 0), Condition(3));
- __ vlgv(scratch_1, src1, MemOperand(r0, 0), Condition(3));
+ __ vlgv(scratch_0, src0, MemOperand(r0, i), Condition(3));
+ __ vlgv(scratch_1, src1, MemOperand(r0, i), Condition(3));
__ Mul64(scratch_0, scratch_1);
scratch_0 = r1;
scratch_1 = ip;
@@ -4176,7 +4178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#undef BINOP_EXTRACT
- case kS390_S8x16Shuffle: {
+ case kS390_I8x16Shuffle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -4196,7 +4198,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
break;
}
- case kS390_S8x16Swizzle: {
+ case kS390_I8x16Swizzle: {
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
@@ -4579,9 +4581,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue(ip);
- if (call_descriptor->PushArgumentCount()) {
- __ Push(kJavaScriptCallArgCountRegister);
- }
} else {
StackFrame::Type type = info()->GetOutputStackFrameType();
// TODO(mbrandy): Detect cases where ip is the entrypoint (for
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 693b555ae7..ab7973c089 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -365,8 +365,8 @@ namespace compiler {
V(S390_I8x16RoundingAverageU) \
V(S390_I8x16Abs) \
V(S390_I8x16BitMask) \
- V(S390_S8x16Shuffle) \
- V(S390_S8x16Swizzle) \
+ V(S390_I8x16Shuffle) \
+ V(S390_I8x16Swizzle) \
V(S390_V64x2AnyTrue) \
V(S390_V32x4AnyTrue) \
V(S390_V16x8AnyTrue) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index 1117ec5141..c0a854b7f1 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -311,8 +311,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_I8x16RoundingAverageU:
case kS390_I8x16Abs:
case kS390_I8x16BitMask:
- case kS390_S8x16Shuffle:
- case kS390_S8x16Swizzle:
+ case kS390_I8x16Shuffle:
+ case kS390_I8x16Swizzle:
case kS390_V64x2AnyTrue:
case kS390_V32x4AnyTrue:
case kS390_V16x8AnyTrue:
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index bb74050dcf..ee3e996169 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -2201,6 +2201,10 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
VisitFloat64Compare(this, node, &cont);
}
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::EmitPrepareArguments(
ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
Node* node) {
@@ -2820,7 +2824,7 @@ SIMD_VISIT_PMIN_MAX(F32x4Pmax)
#undef SIMD_VISIT_PMIN_MAX
#undef SIMD_TYPES
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
uint8_t* shuffle_p = &shuffle[0];
bool is_swizzle;
@@ -2841,7 +2845,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
}
shuffle_p = &shuffle_remapped[0];
#endif
- Emit(kS390_S8x16Shuffle, g.DefineAsRegister(node),
+ Emit(kS390_I8x16Shuffle, g.DefineAsRegister(node),
g.UseUniqueRegister(input0), g.UseUniqueRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p + 4)),
@@ -2849,9 +2853,9 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle_p + 12)));
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
S390OperandGenerator g(this);
- Emit(kS390_S8x16Swizzle, g.DefineAsRegister(node),
+ Emit(kS390_I8x16Swizzle, g.DefineAsRegister(node),
g.UseUniqueRegister(node->InputAt(0)),
g.UseUniqueRegister(node->InputAt(1)));
}
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index b84df81925..e0cf602b11 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -194,94 +194,6 @@ class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
XMMRegister const result_;
};
-class OutOfLineF32x4Min final : public OutOfLineCode {
- public:
- OutOfLineF32x4Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
- : OutOfLineCode(gen), result_(result), error_(error) {}
-
- void Generate() final {
- // |result| is the partial result, |kScratchDoubleReg| is the error.
- // propagate -0's and NaNs (possibly non-canonical) from the error.
- __ Orps(error_, result_);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpps(result_, error_, int8_t{3});
- __ Orps(error_, result_);
- __ Psrld(result_, byte{10});
- __ Andnps(result_, error_);
- }
-
- private:
- XMMRegister const result_;
- XMMRegister const error_;
-};
-
-class OutOfLineF64x2Min final : public OutOfLineCode {
- public:
- OutOfLineF64x2Min(CodeGenerator* gen, XMMRegister result, XMMRegister error)
- : OutOfLineCode(gen), result_(result), error_(error) {}
-
- void Generate() final {
- // |result| is the partial result, |kScratchDoubleReg| is the error.
- // propagate -0's and NaNs (possibly non-canonical) from the error.
- __ Orpd(error_, result_);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmppd(result_, error_, int8_t{3});
- __ Orpd(error_, result_);
- __ Psrlq(result_, 13);
- __ Andnpd(result_, error_);
- }
-
- private:
- XMMRegister const result_;
- XMMRegister const error_;
-};
-
-class OutOfLineF32x4Max final : public OutOfLineCode {
- public:
- OutOfLineF32x4Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
- : OutOfLineCode(gen), result_(result), error_(error) {}
-
- void Generate() final {
- // |result| is the partial result, |kScratchDoubleReg| is the error.
- // Propagate NaNs (possibly non-canonical).
- __ Orps(result_, error_);
- // Propagate sign errors and (subtle) quiet NaNs.
- __ Subps(result_, error_);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpps(error_, result_, int8_t{3});
- __ Psrld(error_, byte{10});
- __ Andnps(error_, result_);
- __ Movaps(result_, error_);
- }
-
- private:
- XMMRegister const result_;
- XMMRegister const error_;
-};
-
-class OutOfLineF64x2Max final : public OutOfLineCode {
- public:
- OutOfLineF64x2Max(CodeGenerator* gen, XMMRegister result, XMMRegister error)
- : OutOfLineCode(gen), result_(result), error_(error) {}
-
- void Generate() final {
- // |result| is the partial result, |kScratchDoubleReg| is the error.
- // Propagate NaNs (possibly non-canonical).
- __ Orpd(result_, error_);
- // Propagate sign errors and (subtle) quiet NaNs.
- __ Subpd(result_, error_);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmppd(error_, result_, int8_t{3});
- __ Psrlq(error_, byte{13});
- __ Andnpd(error_, result_);
- __ Movapd(result_, error_);
- }
-
- private:
- XMMRegister const result_;
- XMMRegister const error_;
-};
-
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
@@ -745,13 +657,33 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(TurboAssembler* assembler,
+void AdjustStackPointerForTailCall(Instruction* instr,
+ TurboAssembler* assembler, Linkage* linkage,
+ OptimizedCompilationInfo* info,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
- int current_sp_offset = state->GetSPToFPSlotCount() +
- StandardFrameConstants::kFixedSlotCountAboveFp;
- int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ int stack_slot_delta;
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
+ // For this special tail-call mode, the callee has the same arguments and
+ // linkage as the caller, and arguments adapter frames must be preserved.
+ // Thus we simply have reset the stack pointer register to its original
+ // value before frame construction.
+ // See also: AssembleConstructFrame.
+ DCHECK(!info->is_osr());
+ DCHECK_EQ(linkage->GetIncomingDescriptor()->CalleeSavedRegisters(), 0);
+ DCHECK_EQ(linkage->GetIncomingDescriptor()->CalleeSavedFPRegisters(), 0);
+ DCHECK_EQ(state->frame()->GetReturnSlotCount(), 0);
+ stack_slot_delta = (state->frame()->GetTotalFrameSlotCount() -
+ kReturnAddressStackSlotCount) *
+ -1;
+ DCHECK_LE(stack_slot_delta, 0);
+ } else {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ }
+
if (stack_slot_delta > 0) {
assembler->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
state->IncreaseSPDelta(stack_slot_delta);
@@ -778,12 +710,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
first_unused_stack_slot)) {
+ DCHECK(!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp));
X64OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
@@ -801,14 +735,15 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
move->Eliminate();
}
}
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot, false);
+ AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ frame_access_state(), first_unused_stack_slot,
+ false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
- AdjustStackPointerForTailCall(tasm(), frame_access_state(),
- first_unused_stack_slot);
+ AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(),
+ frame_access_state(), first_unused_stack_slot);
}
// Check that {kJavaScriptCallCodeStartRegister} is correct.
@@ -912,12 +847,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchTailCallCodeObjectFromJSFunction:
- case kArchTailCallCodeObject: {
- if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
+ if (!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
}
+ V8_FALLTHROUGH;
+ case kArchTailCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2348,6 +2284,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ movq(Operand(rsp, slot * kSystemPointerSize), i.InputImmediate(0));
+ } else if (instr->InputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Movsd(Operand(rsp, slot * kSystemPointerSize),
+ i.InputDoubleRegister(0));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Movss(Operand(rsp, slot * kSystemPointerSize),
+ i.InputFloatRegister(0));
+ }
} else {
__ movq(Operand(rsp, slot * kSystemPointerSize), i.InputRegister(0));
}
@@ -2419,18 +2365,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minpd instruction doesn't propagate NaNs and -0's in its first
- // operand. Perform minpd in both orders and compare results. Handle the
- // unlikely case of discrepancies out of line.
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the resuls, and adjust.
__ Movapd(kScratchDoubleReg, src1);
__ Minpd(kScratchDoubleReg, dst);
__ Minpd(dst, src1);
- // Most likely there is no difference and we're done.
- __ Xorpd(kScratchDoubleReg, dst);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- auto ool = zone()->New<OutOfLineF64x2Min>(this, dst, kScratchDoubleReg);
- __ j(not_zero, ool->entry());
- __ bind(ool->exit());
+ // propagate -0's and NaNs, which may be non-canonical.
+ __ Orpd(kScratchDoubleReg, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
+ __ Orpd(kScratchDoubleReg, dst);
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, kScratchDoubleReg);
break;
}
case kX64F64x2Max: {
@@ -2438,17 +2384,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders and compare results. Handle the
- // unlikely case of discrepancies out of line.
+ // operand. Perform maxpd in both orders, merge the resuls, and adjust.
__ Movapd(kScratchDoubleReg, src1);
__ Maxpd(kScratchDoubleReg, dst);
__ Maxpd(dst, src1);
- // Most likely there is no difference and we're done.
- __ Xorpd(kScratchDoubleReg, dst);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- auto ool = zone()->New<OutOfLineF64x2Max>(this, dst, kScratchDoubleReg);
- __ j(not_zero, ool->entry());
- __ bind(ool->exit());
+ // Find discrepancies.
+ __ Xorpd(dst, kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ __ Orpd(kScratchDoubleReg, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ __ Subpd(kScratchDoubleReg, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
+ __ Psrlq(dst, 13);
+ __ Andnpd(dst, kScratchDoubleReg);
break;
}
case kX64F64x2Eq: {
@@ -2612,18 +2561,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src1 = i.InputSimd128Register(1),
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and -0's in its first
- // operand. Perform minps in both orders and compare results. Handle the
- // unlikely case of discrepancies out of line.
+ // The minps instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minps in both orders, merge the resuls, and adjust.
__ Movaps(kScratchDoubleReg, src1);
__ Minps(kScratchDoubleReg, dst);
__ Minps(dst, src1);
- // Most likely there is no difference and we're done.
- __ Xorps(kScratchDoubleReg, dst);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- auto ool = zone()->New<OutOfLineF32x4Min>(this, dst, kScratchDoubleReg);
- __ j(not_zero, ool->entry());
- __ bind(ool->exit());
+ // propagate -0's and NaNs, which may be non-canonical.
+ __ Orps(kScratchDoubleReg, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
+ __ Orps(kScratchDoubleReg, dst);
+ __ Psrld(dst, byte{10});
+ __ Andnps(dst, kScratchDoubleReg);
break;
}
case kX64F32x4Max: {
@@ -2631,17 +2580,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
dst = i.OutputSimd128Register();
DCHECK_EQ(dst, i.InputSimd128Register(0));
// The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders and compare results. Handle the
- // unlikely case of discrepancies out of line.
+ // operand. Perform maxps in both orders, merge the resuls, and adjust.
__ Movaps(kScratchDoubleReg, src1);
__ Maxps(kScratchDoubleReg, dst);
__ Maxps(dst, src1);
- // Most likely there is no difference and we're done.
- __ Xorps(kScratchDoubleReg, dst);
- __ Ptest(kScratchDoubleReg, kScratchDoubleReg);
- auto ool = zone()->New<OutOfLineF32x4Max>(this, dst, kScratchDoubleReg);
- __ j(not_zero, ool->entry());
- __ bind(ool->exit());
+ // Find discrepancies.
+ __ Xorps(dst, kScratchDoubleReg);
+ // Propagate NaNs, which may be non-canonical.
+ __ Orps(kScratchDoubleReg, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ __ Subps(kScratchDoubleReg, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
+ __ Psrld(dst, byte{10});
+ __ Andnps(dst, kScratchDoubleReg);
break;
}
case kX64F32x4Eq: {
@@ -3724,7 +3676,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Andnps(dst, i.InputSimd128Register(1));
break;
}
- case kX64S8x16Swizzle: {
+ case kX64I8x16Swizzle: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
XMMRegister mask = i.TempSimd128Register(0);
@@ -3737,7 +3689,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufb(dst, mask);
break;
}
- case kX64S8x16Shuffle: {
+ case kX64I8x16Shuffle: {
XMMRegister dst = i.OutputSimd128Register();
XMMRegister tmp_simd = i.TempSimd128Register(0);
if (instr->InputCount() == 5) { // only one input operand
@@ -4454,7 +4406,7 @@ static const int kQuadWordSize = 16;
} // namespace
void CodeGenerator::FinishFrame(Frame* frame) {
- auto call_descriptor = linkage()->GetIncomingDescriptor();
+ CallDescriptor* call_descriptor = linkage()->GetIncomingDescriptor();
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
if (saves_fp != 0) {
@@ -4492,9 +4444,6 @@ void CodeGenerator::AssembleConstructFrame() {
}
} else if (call_descriptor->IsJSFunctionCall()) {
__ Prologue();
- if (call_descriptor->PushArgumentCount()) {
- __ pushq(kJavaScriptCallArgCountRegister);
- }
} else {
__ StubPrologue(info()->GetOutputStackFrameType());
if (call_descriptor->IsWasmFunctionCall()) {
@@ -4610,7 +4559,7 @@ void CodeGenerator::AssembleConstructFrame() {
}
}
-void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
auto call_descriptor = linkage()->GetIncomingDescriptor();
// Restore registers.
@@ -4643,39 +4592,91 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
unwinding_info_writer_.MarkBlockWillExit();
- // Might need rcx for scratch if pop_size is too big or if there is a variable
- // pop count.
+ // We might need rcx and rdx for scratch.
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rcx.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & rdx.bit());
- size_t pop_size = call_descriptor->StackParameterCount() * kSystemPointerSize;
+ int parameter_count =
+ static_cast<int>(call_descriptor->StackParameterCount());
X64OperandConverter g(this, nullptr);
+ Register pop_reg = additional_pop_count->IsImmediate()
+ ? rcx
+ : g.ToRegister(additional_pop_count);
+ Register scratch_reg = pop_reg == rcx ? rdx : rcx;
+ Register argc_reg =
+ additional_pop_count->IsImmediate() ? pop_reg : scratch_reg;
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_count} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_count != 0;
+#else
+ const bool drop_jsargs = false;
+#endif
if (call_descriptor->IsCFunctionCall()) {
AssembleDeconstructFrame();
} else if (frame_access_state()->has_frame()) {
- if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
// Canonicalize JSFunction return sites for now.
if (return_label_.is_bound()) {
__ jmp(&return_label_);
return;
} else {
__ bind(&return_label_);
- AssembleDeconstructFrame();
}
- } else {
- AssembleDeconstructFrame();
}
+ if (drop_jsargs) {
+ // Get the actual argument count.
+ __ movq(argc_reg, Operand(rbp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
}
- if (pop->IsImmediate()) {
- pop_size += g.ToConstant(pop).ToInt32() * kSystemPointerSize;
- CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
- __ Ret(static_cast<int>(pop_size), rcx);
+ if (drop_jsargs) {
+ // In addition to the slots given by {additional_pop_count}, we must pop all
+ // arguments from the stack (including the receiver). This number of
+ // arguments is given by max(1 + argc_reg, parameter_count).
+ Label argc_reg_has_final_count;
+ // Exclude the receiver to simplify the computation. We'll account for it at
+ // the end.
+ int parameter_count_withouth_receiver = parameter_count - 1;
+ if (parameter_count_withouth_receiver != 0) {
+ __ cmpq(argc_reg, Immediate(parameter_count_withouth_receiver));
+ __ j(greater_equal, &argc_reg_has_final_count, Label::kNear);
+ __ movq(argc_reg, Immediate(parameter_count_withouth_receiver));
+ __ bind(&argc_reg_has_final_count);
+ }
+ // Add additional pop count.
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(pop_reg, argc_reg);
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ if (additional_count != 0) {
+ __ addq(pop_reg, Immediate(additional_count));
+ }
+ } else {
+ __ addq(pop_reg, argc_reg);
+ }
+ __ PopReturnAddressTo(scratch_reg);
+ __ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
+ kSystemPointerSize)); // Also pop the receiver.
+ // We use a return instead of a jump for better return address prediction.
+ __ PushReturnAddressFrom(scratch_reg);
+ __ Ret();
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ size_t pop_size = (parameter_count + additional_count) * kSystemPointerSize;
+ CHECK_LE(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
+ __ Ret(static_cast<int>(pop_size), scratch_reg);
} else {
- Register pop_reg = g.ToRegister(pop);
- Register scratch_reg = pop_reg == rcx ? rdx : rcx;
- __ popq(scratch_reg);
- __ leaq(rsp, Operand(rsp, pop_reg, times_8, static_cast<int>(pop_size)));
- __ jmp(scratch_reg);
+ int pop_size = static_cast<int>(parameter_count * kSystemPointerSize);
+ __ PopReturnAddressTo(scratch_reg);
+ __ leaq(rsp, Operand(rsp, pop_reg, times_system_pointer_size,
+ static_cast<int>(pop_size)));
+ __ PushReturnAddressFrom(scratch_reg);
+ __ Ret();
}
}
@@ -4923,15 +4924,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
case MoveType::kRegisterToStack: {
if (source->IsRegister()) {
Register src = g.ToRegister(source);
- __ pushq(src);
- frame_access_state()->IncreaseSPDelta(1);
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- kSystemPointerSize);
- __ movq(src, g.ToOperand(destination));
- frame_access_state()->IncreaseSPDelta(-1);
- __ popq(g.ToOperand(destination));
- unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
- -kSystemPointerSize);
+ Operand dst = g.ToOperand(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(src, dst);
+ __ movq(dst, kScratchRegister);
} else {
DCHECK(source->IsFPRegister());
XMMRegister src = g.ToDoubleRegister(source);
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 21f718b315..7312121a0a 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -325,8 +325,8 @@ namespace compiler {
V(X64S128Xor) \
V(X64S128Select) \
V(X64S128AndNot) \
- V(X64S8x16Swizzle) \
- V(X64S8x16Shuffle) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
V(X64S8x16LoadSplat) \
V(X64S16x8LoadSplat) \
V(X64S32x4LoadSplat) \
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 56ca9f1dc1..169753b40e 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -303,8 +303,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64V32x4AllTrue:
case kX64V16x8AnyTrue:
case kX64V16x8AllTrue:
- case kX64S8x16Swizzle:
- case kX64S8x16Shuffle:
+ case kX64I8x16Swizzle:
+ case kX64I8x16Shuffle:
case kX64S32x4Swizzle:
case kX64S32x4Shuffle:
case kX64S16x8Blend:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 7df4fcbebd..db212677ea 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -1297,9 +1297,9 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
}
}
-namespace {
-
-bool ZeroExtendsWord32ToWord64(Node* node) {
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ X64OperandGenerator g(this);
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) {
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
@@ -1353,13 +1353,20 @@ bool ZeroExtendsWord32ToWord64(Node* node) {
return false;
}
}
+ case IrOpcode::kInt32Constant:
+ case IrOpcode::kInt64Constant:
+ // Constants are loaded with movl or movq, or xorl for zero; see
+ // CodeGenerator::AssembleMove. So any non-negative constant that fits
+ // in a 32-bit signed integer is zero-extended to 64 bits.
+ if (g.CanBeImmediate(node)) {
+ return g.GetImmediateIntegerValue(node) >= 0;
+ }
+ return false;
default:
return false;
}
}
-} // namespace
-
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -2271,7 +2278,12 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
value_operand, g.TempImmediate(-sw.min_value()));
} else {
// Zero extend, because we use it as 64-bit index into the jump table.
- Emit(kX64Movl, index_operand, value_operand);
+ if (ZeroExtendsWord32ToWord64(node->InputAt(0))) {
+ // Input value has already been zero-extended.
+ index_operand = value_operand;
+ } else {
+ Emit(kX64Movl, index_operand, value_operand);
+ }
}
// Generate a table lookup.
return EmitTableSwitch(sw, index_operand);
@@ -3257,7 +3269,7 @@ bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
} // namespace
-void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
@@ -3273,9 +3285,12 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
// Swizzles don't generally need DefineSameAsFirst to avoid a move.
bool no_same_as_first = is_swizzle;
// We generally need UseRegister for input0, Use for input1.
+ // TODO(v8:9198): We don't have 16-byte alignment for SIMD operands yet, but
+ // we retain this logic (continue setting these in the various shuffle match
+ // clauses), but ignore it when selecting registers or slots.
bool src0_needs_reg = true;
bool src1_needs_reg = false;
- ArchOpcode opcode = kX64S8x16Shuffle; // general shuffle is the default
+ ArchOpcode opcode = kX64I8x16Shuffle; // general shuffle is the default
uint8_t offset;
uint8_t shuffle32x4[4];
@@ -3365,7 +3380,7 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
src0_needs_reg = true;
imms[imm_count++] = index;
}
- if (opcode == kX64S8x16Shuffle) {
+ if (opcode == kX64I8x16Shuffle) {
// Use same-as-first for general swizzle, but not shuffle.
no_same_as_first = !is_swizzle;
src0_needs_reg = !no_same_as_first;
@@ -3381,16 +3396,18 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Node* input0 = node->InputAt(0);
InstructionOperand dst =
no_same_as_first ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
- InstructionOperand src0 =
- src0_needs_reg ? g.UseUniqueRegister(input0) : g.UseUnique(input0);
+ // TODO(v8:9198): Use src0_needs_reg when we have memory alignment for SIMD.
+ InstructionOperand src0 = g.UseUniqueRegister(input0);
+ USE(src0_needs_reg);
int input_count = 0;
InstructionOperand inputs[2 + kMaxImms + kMaxTemps];
inputs[input_count++] = src0;
if (!is_swizzle) {
Node* input1 = node->InputAt(1);
- inputs[input_count++] =
- src1_needs_reg ? g.UseUniqueRegister(input1) : g.UseUnique(input1);
+ // TODO(v8:9198): Use src1_needs_reg when we have memory alignment for SIMD.
+ inputs[input_count++] = g.UseUniqueRegister(input1);
+ USE(src1_needs_reg);
}
for (int i = 0; i < imm_count; ++i) {
inputs[input_count++] = g.UseImmediate(imms[i]);
@@ -3398,10 +3415,10 @@ void InstructionSelector::VisitS8x16Shuffle(Node* node) {
Emit(opcode, 1, &dst, input_count, inputs, temp_count, temps);
}
-void InstructionSelector::VisitS8x16Swizzle(Node* node) {
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
- Emit(kX64S8x16Swizzle, g.DefineSameAsFirst(node),
+ Emit(kX64I8x16Swizzle, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseUniqueRegister(node->InputAt(1)),
arraysize(temps), temps);
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index ecdfe72d7d..7855bc4c44 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -39,7 +39,7 @@ class BytecodeGraphBuilder {
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
- BytecodeGraphBuilderFlags flags,
+ CodeKind code_kind, BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter);
// Creates a graph by visiting bytecodes.
@@ -63,8 +63,26 @@ class BytecodeGraphBuilder {
// Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
+ CodeKind code_kind() const { return code_kind_; }
bool native_context_independent() const {
- return native_context_independent_;
+ return CodeKindIsNativeContextIndependentJSFunction(code_kind_);
+ }
+ bool generate_full_feedback_collection() const {
+ // NCI code currently collects full feedback.
+ DCHECK_IMPLIES(native_context_independent(),
+ CollectFeedbackInGenericLowering());
+ return native_context_independent();
+ }
+
+ static JSTypeHintLowering::LoweringResult NoChange() {
+ return JSTypeHintLowering::LoweringResult::NoChange();
+ }
+ bool CanApplyTypeHintLowering(IrOpcode::Value opcode) const {
+ return !generate_full_feedback_collection() ||
+ !IrOpcode::IsFeedbackCollectingOpcode(opcode);
+ }
+ bool CanApplyTypeHintLowering(const Operator* op) const {
+ return CanApplyTypeHintLowering(static_cast<IrOpcode::Value>(op->opcode()));
}
// The node representing the current feedback vector is generated once prior
@@ -97,6 +115,12 @@ class BytecodeGraphBuilder {
Node* BuildLoadFeedbackCell(int index);
+ // Checks the optimization marker and potentially triggers compilation or
+ // installs the finished code object.
+ // Only relevant for specific code kinds (see
+ // CodeKindChecksOptimizationMarker).
+ void MaybeBuildTierUpCheck();
+
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
@@ -426,7 +450,7 @@ class BytecodeGraphBuilder {
int input_buffer_size_;
Node** input_buffer_;
- const bool native_context_independent_;
+ const CodeKind code_kind_;
Node* feedback_cell_node_;
Node* feedback_vector_node_;
Node* native_context_node_;
@@ -958,7 +982,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
SharedFunctionInfoRef const& shared_info,
FeedbackVectorRef const& feedback_vector, BailoutId osr_offset,
JSGraph* jsgraph, CallFrequency const& invocation_frequency,
- SourcePositionTable* source_positions, int inlining_id,
+ SourcePositionTable* source_positions, int inlining_id, CodeKind code_kind,
BytecodeGraphBuilderFlags flags, TickCounter* tick_counter)
: broker_(broker),
local_zone_(local_zone),
@@ -977,7 +1001,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_array().parameter_count(), bytecode_array().register_count(),
shared_info.object())),
source_position_iterator_(std::make_unique<SourcePositionTableIterator>(
- bytecode_array().source_positions())),
+ bytecode_array().SourcePositionTable())),
bytecode_iterator_(
std::make_unique<OffHeapBytecodeArray>(bytecode_array())),
bytecode_analysis_(broker_->GetBytecodeAnalysis(
@@ -997,8 +1021,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
- native_context_independent_(
- flags & BytecodeGraphBuilderFlag::kNativeContextIndependent),
+ code_kind_(code_kind),
feedback_cell_node_(nullptr),
feedback_vector_node_(nullptr),
native_context_node_(nullptr),
@@ -1120,6 +1143,19 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContext() {
return native_context;
}
+void BytecodeGraphBuilder::MaybeBuildTierUpCheck() {
+ if (!CodeKindChecksOptimizationMarker(code_kind())) return;
+
+ Environment* env = environment();
+ Node* control = env->GetControlDependency();
+ Node* effect = env->GetEffectDependency();
+
+ effect = graph()->NewNode(simplified()->TierUpCheck(), feedback_vector_node(),
+ effect, control);
+
+ env->UpdateEffectDependency(effect);
+}
+
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
Node* result = NewNode(javascript()->LoadContext(0, index, true));
NodeProperties::ReplaceContextInput(result, native_context_node());
@@ -1141,8 +1177,9 @@ void BytecodeGraphBuilder::CreateGraph() {
// Set up the basic structure of the graph. Outputs for {Start} are the formal
// parameters (including the receiver) plus new target, number of arguments,
// context and closure.
- int actual_parameter_count = bytecode_array().parameter_count() + 4;
- graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
+ int start_output_arity = StartNode::OutputArityForFormalParameterCount(
+ bytecode_array().parameter_count());
+ graph()->SetStart(graph()->NewNode(common()->Start(start_output_arity)));
Environment env(this, bytecode_array().register_count(),
bytecode_array().parameter_count(),
@@ -1152,7 +1189,9 @@ void BytecodeGraphBuilder::CreateGraph() {
CreateFeedbackCellNode();
CreateFeedbackVectorNode();
+ MaybeBuildTierUpCheck();
CreateNativeContextNode();
+
VisitBytecodes();
// Finish the basic structure of the graph.
@@ -1539,6 +1578,7 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
DCHECK(IsLoadGlobalICKind(broker()->GetFeedbackSlotKind(feedback)));
const Operator* op =
javascript()->LoadGlobal(name.object(), feedback, typeof_mode);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
return NewNode(op, feedback_vector_node());
}
@@ -1574,6 +1614,7 @@ void BytecodeGraphBuilder::VisitStaGlobal() {
GetLanguageModeFromSlotKind(broker()->GetFeedbackSlotKind(feedback));
const Operator* op =
javascript()->StoreGlobal(language_mode, name.object(), feedback);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, value, feedback_vector_node());
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1598,6 +1639,7 @@ void BytecodeGraphBuilder::VisitStaInArrayLiteral() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, array, index, value, feedback_vector_node());
}
@@ -1626,6 +1668,7 @@ void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, name, value, jsgraph()->Constant(flags),
feedback_vector_node());
}
@@ -1980,6 +2023,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -1992,10 +2036,25 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
NameRef name(broker(),
bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, object, feedback_vector_node());
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
+ PrepareEagerCheckpoint();
+ Node* receiver =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* home_object = environment()->LookupAccumulator();
+ NameRef name(broker(),
+ bytecode_iterator().GetConstantForIndexOperand(1, isolate()));
+ const Operator* op = javascript()->LoadNamedFromSuper(name.object());
+ // TODO(marja, v8:9237): Use lowering.
+
+ Node* node = NewNode(op, receiver, home_object);
+ environment()->BindAccumulator(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
PrepareEagerCheckpoint();
Node* key = environment()->LookupAccumulator();
@@ -2017,6 +2076,7 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
STATIC_ASSERT(JSLoadPropertyNode::ObjectIndex() == 0);
STATIC_ASSERT(JSLoadPropertyNode::KeyIndex() == 1);
STATIC_ASSERT(JSLoadPropertyNode::FeedbackVectorIndex() == 2);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, key, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -2054,6 +2114,7 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, value, feedback_vector_node());
}
environment()->RecordAfterState(node, Environment::kAttachFrameState);
@@ -2074,6 +2135,7 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
javascript()->StoreNamed(language_mode, name.object(), FeedbackSource());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, object, value, feedback_vector_node());
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -2108,6 +2170,7 @@ void BytecodeGraphBuilder::VisitStaKeyedProperty() {
STATIC_ASSERT(JSStorePropertyNode::KeyIndex() == 1);
STATIC_ASSERT(JSStorePropertyNode::ValueIndex() == 2);
STATIC_ASSERT(JSStorePropertyNode::FeedbackVectorIndex() == 3);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, object, key, value, feedback_vector_node());
}
@@ -2243,9 +2306,10 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
FeedbackSource pair = CreateFeedbackSource(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
STATIC_ASSERT(JSCreateLiteralRegExpNode::FeedbackVectorIndex() == 0);
- Node* literal = NewNode(javascript()->CreateLiteralRegExp(
- constant_pattern.object(), pair, literal_flags),
- feedback_vector_node());
+ const Operator* op = javascript()->CreateLiteralRegExp(
+ constant_pattern.object(), pair, literal_flags);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
@@ -2265,18 +2329,20 @@ void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
int number_of_elements =
array_boilerplate_description.constants_elements_length();
STATIC_ASSERT(JSCreateLiteralArrayNode::FeedbackVectorIndex() == 0);
- Node* literal = NewNode(
+ const Operator* op =
javascript()->CreateLiteralArray(array_boilerplate_description.object(),
- pair, literal_flags, number_of_elements),
- feedback_vector_node());
+ pair, literal_flags, number_of_elements);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitCreateEmptyArrayLiteral() {
int const slot_id = bytecode_iterator().GetIndexOperand(0);
FeedbackSource pair = CreateFeedbackSource(slot_id);
- Node* literal = NewNode(javascript()->CreateEmptyLiteralArray(pair),
- feedback_vector_node());
+ const Operator* op = javascript()->CreateEmptyLiteralArray(pair);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal);
}
@@ -2296,10 +2362,10 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
int number_of_properties = constant_properties.size();
STATIC_ASSERT(JSCreateLiteralObjectNode::FeedbackVectorIndex() == 0);
- Node* literal = NewNode(
- javascript()->CreateLiteralObject(constant_properties.object(), pair,
- literal_flags, number_of_properties),
- feedback_vector_node());
+ const Operator* op = javascript()->CreateLiteralObject(
+ constant_properties.object(), pair, literal_flags, number_of_properties);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* literal = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(literal, Environment::kAttachFrameState);
}
@@ -2318,6 +2384,7 @@ void BytecodeGraphBuilder::VisitCloneObject() {
javascript()->CloneObject(CreateFeedbackSource(slot), flags);
STATIC_ASSERT(JSCloneObjectNode::SourceIndex() == 0);
STATIC_ASSERT(JSCloneObjectNode::FeedbackVectorIndex() == 1);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* value = NewNode(op, source, feedback_vector_node());
environment()->BindAccumulator(value, Environment::kAttachFrameState);
}
@@ -2329,10 +2396,10 @@ void BytecodeGraphBuilder::VisitGetTemplateObject() {
TemplateObjectDescriptionRef description(
broker(), bytecode_iterator().GetConstantForIndexOperand(0, isolate()));
STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
- Node* template_object =
- NewNode(javascript()->GetTemplateObject(description.object(),
- shared_info().object(), source),
- feedback_vector_node());
+ const Operator* op = javascript()->GetTemplateObject(
+ description.object(), shared_info().object(), source);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* template_object = NewNode(op, feedback_vector_node());
environment()->BindAccumulator(template_object);
}
@@ -2378,6 +2445,7 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
const Operator* op =
javascript()->Call(arg_count, frequency, feedback, receiver_mode,
speculation_mode, CallFeedbackRelation::kRelated);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
op, args, static_cast<int>(arg_count), feedback.slot);
@@ -2563,6 +2631,7 @@ void BytecodeGraphBuilder::VisitCallWithSpread() {
const Operator* op = javascript()->CallWithSpread(
JSCallWithSpreadNode::ArityForArgc(arg_count), frequency, feedback,
speculation_mode);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
op, args, static_cast<int>(arg_count), feedback.slot);
@@ -2687,6 +2756,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
const uint32_t arg_count = static_cast<uint32_t>(reg_count);
const uint32_t arity = JSConstructNode::ArityForArgc(arg_count);
const Operator* op = javascript()->Construct(arity, frequency, feedback);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2719,6 +2789,7 @@ void BytecodeGraphBuilder::VisitConstructWithSpread() {
const uint32_t arity = JSConstructNode::ArityForArgc(arg_count);
const Operator* op =
javascript()->ConstructWithSpread(arity, frequency, feedback);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* const* args = GetConstructArgumentsFromRegister(callee, new_target,
first_reg, arg_count);
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedConstruct(
@@ -2848,6 +2919,7 @@ void BytecodeGraphBuilder::BuildUnaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, operand, feedback_vector_node());
}
@@ -2872,6 +2944,7 @@ void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, left, right, feedback_vector_node());
}
@@ -3029,6 +3102,7 @@ void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -3167,6 +3241,7 @@ void BytecodeGraphBuilder::BuildCompareOp(const Operator* op) {
node = lowering.value();
} else {
DCHECK(!lowering.Changed());
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
node = NewNode(op, left, right, feedback_vector_node());
}
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -3226,8 +3301,9 @@ void BytecodeGraphBuilder::VisitTestIn() {
STATIC_ASSERT(JSHasPropertyNode::ObjectIndex() == 0);
STATIC_ASSERT(JSHasPropertyNode::KeyIndex() == 1);
STATIC_ASSERT(JSHasPropertyNode::FeedbackVectorIndex() == 2);
- Node* node = NewNode(javascript()->HasProperty(feedback), object, key,
- feedback_vector_node());
+ const Operator* op = javascript()->HasProperty(feedback);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
+ Node* node = NewNode(op, object, key, feedback_vector_node());
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -3608,6 +3684,7 @@ void BytecodeGraphBuilder::VisitGetIterator() {
DCHECK(!lowering.Changed());
STATIC_ASSERT(JSGetIteratorNode::ReceiverIndex() == 0);
STATIC_ASSERT(JSGetIteratorNode::FeedbackVectorIndex() == 1);
+ DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* iterator = NewNode(op, receiver, feedback_vector_node());
environment()->BindAccumulator(iterator, Environment::kAttachFrameState);
}
@@ -4007,6 +4084,7 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedUnaryOp(const Operator* op,
Node* operand,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4020,6 +4098,7 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op, Node* left,
Node* right,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4034,6 +4113,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
Node* cache_array,
Node* cache_type, Node* index,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(IrOpcode::kJSForInNext)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4046,6 +4126,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInNext(Node* receiver,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(IrOpcode::kJSForInPrepare)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4058,6 +4139,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedForInPrepare(Node* enumerator,
JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(IrOpcode::kJSToNumber)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4069,6 +4151,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedToNumber(Node* value,
JSTypeHintLowering::LoweringResult BytecodeGraphBuilder::TryBuildSimplifiedCall(
const Operator* op, Node* const* args, int arg_count, FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4083,6 +4166,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedConstruct(const Operator* op,
Node* const* args,
int arg_count,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4097,6 +4181,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedGetIterator(const Operator* op,
Node* receiver,
FeedbackSlot load_slot,
FeedbackSlot call_slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
@@ -4110,6 +4195,7 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
Node* receiver,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult early_reduction =
@@ -4123,6 +4209,7 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
Node* receiver, Node* key,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4136,6 +4223,7 @@ JSTypeHintLowering::LoweringResult
BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
Node* receiver, Node* value,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4150,6 +4238,7 @@ BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
Node* receiver, Node* key,
Node* value,
FeedbackSlot slot) {
+ if (!CanApplyTypeHintLowering(op)) return NoChange();
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
JSTypeHintLowering::LoweringResult result =
@@ -4387,13 +4476,14 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- int inlining_id, BytecodeGraphBuilderFlags flags,
+ int inlining_id, CodeKind code_kind,
+ BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter) {
DCHECK(broker->IsSerializedForCompilation(shared_info, feedback_vector));
BytecodeGraphBuilder builder(
broker, local_zone, broker->target_native_context(), shared_info,
feedback_vector, osr_offset, jsgraph, invocation_frequency,
- source_positions, inlining_id, flags, tick_counter);
+ source_positions, inlining_id, code_kind, flags, tick_counter);
builder.CreateGraph();
}
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 1667a4d57d..a8423904f8 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -7,8 +7,9 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/js-type-hint-lowering.h"
-#include "src/utils/utils.h"
#include "src/handles/handles.h"
+#include "src/objects/code-kind.h"
+#include "src/utils/utils.h"
namespace v8 {
@@ -33,7 +34,6 @@ enum class BytecodeGraphBuilderFlag : uint8_t {
// bytecode analysis.
kAnalyzeEnvironmentLiveness = 1 << 1,
kBailoutOnUninitialized = 1 << 2,
- kNativeContextIndependent = 1 << 3,
};
using BytecodeGraphBuilderFlags = base::Flags<BytecodeGraphBuilderFlag>;
@@ -45,7 +45,8 @@ void BuildGraphFromBytecode(JSHeapBroker* broker, Zone* local_zone,
BailoutId osr_offset, JSGraph* jsgraph,
CallFrequency const& invocation_frequency,
SourcePositionTable* source_positions,
- int inlining_id, BytecodeGraphBuilderFlags flags,
+ int inlining_id, CodeKind code_kind,
+ BytecodeGraphBuilderFlags flags,
TickCounter* tick_counter);
} // namespace compiler
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index fdf0a66a22..af467f2bb1 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -31,6 +31,7 @@ namespace {
// == x64 windows ============================================================
#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS rcx, rdx, r8, r9
+#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() | r14.bit() | \
r15.bit()
@@ -42,6 +43,7 @@ namespace {
#else // V8_TARGET_OS_WIN
// == x64 other ==============================================================
#define PARAM_REGISTERS rdi, rsi, rdx, rcx, r8, r9
+#define FP_PARAM_REGISTERS xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7
#define CALLEE_SAVE_REGISTERS \
rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit()
#endif // V8_TARGET_OS_WIN
@@ -137,38 +139,116 @@ namespace {
#endif
} // namespace
+#ifdef V8_TARGET_OS_WIN
+// As defined in
+// https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention?view=vs-2019#parameter-passing,
+// Windows calling convention doesn't differentiate between GP and FP params
+// when counting how many of them should be placed in registers. That's why
+// we use the same counter {i} for both types here.
+void BuildParameterLocations(const MachineSignature* msig,
+ size_t kFPParamRegisterCount,
+ size_t kParamRegisterCount,
+ const DoubleRegister* kFPParamRegisters,
+ const v8::internal::Register* kParamRegisters,
+ LocationSignature::Builder* out_locations) {
+#ifdef STACK_SHADOW_WORDS
+ int stack_offset = STACK_SHADOW_WORDS;
+#else
+ int stack_offset = 0;
+#endif
+ CHECK_EQ(kFPParamRegisterCount, kParamRegisterCount);
+
+ for (size_t i = 0; i < msig->parameter_count(); i++) {
+ MachineType type = msig->GetParam(i);
+ bool spill = (i >= kParamRegisterCount);
+ if (spill) {
+ out_locations->AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - stack_offset, type));
+ stack_offset++;
+ } else {
+ if (IsFloatingPoint(type.representation())) {
+ out_locations->AddParam(
+ LinkageLocation::ForRegister(kFPParamRegisters[i].code(), type));
+ } else {
+ out_locations->AddParam(
+ LinkageLocation::ForRegister(kParamRegisters[i].code(), type));
+ }
+ }
+ }
+}
+#else // V8_TARGET_OS_WIN
+// As defined in https://www.agner.org/optimize/calling_conventions.pdf,
+// Section 7, Linux and Mac place parameters in consecutive registers,
+// differentiating between GP and FP params. That's why we maintain two
+// separate counters here.
+void BuildParameterLocations(const MachineSignature* msig,
+ size_t kFPParamRegisterCount,
+ size_t kParamRegisterCount,
+ const DoubleRegister* kFPParamRegisters,
+ const v8::internal::Register* kParamRegisters,
+ LocationSignature::Builder* out_locations) {
+#ifdef STACK_SHADOW_WORDS
+ int stack_offset = STACK_SHADOW_WORDS;
+#else
+ int stack_offset = 0;
+#endif
+ size_t num_params = 0;
+ size_t num_fp_params = 0;
+ for (size_t i = 0; i < msig->parameter_count(); i++) {
+ MachineType type = msig->GetParam(i);
+ bool spill = IsFloatingPoint(type.representation())
+ ? (num_fp_params >= kFPParamRegisterCount)
+ : (num_params >= kParamRegisterCount);
+ if (spill) {
+ out_locations->AddParam(
+ LinkageLocation::ForCallerFrameSlot(-1 - stack_offset, type));
+ stack_offset++;
+ } else {
+ if (IsFloatingPoint(type.representation())) {
+ out_locations->AddParam(LinkageLocation::ForRegister(
+ kFPParamRegisters[num_fp_params].code(), type));
+ ++num_fp_params;
+ } else {
+ out_locations->AddParam(LinkageLocation::ForRegister(
+ kParamRegisters[num_params].code(), type));
+ ++num_params;
+ }
+ }
+ }
+}
+#endif // V8_TARGET_OS_WIN
// General code uses the above configuration data.
CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
const MachineSignature* msig,
CallDescriptor::Flags flags) {
+#ifdef UNSUPPORTED_C_LINKAGE
+ // This method should not be called on unknown architectures.
+ FATAL("requested C call descriptor on unsupported architecture");
+ return nullptr;
+#endif
+
DCHECK_LE(msig->parameter_count(), static_cast<size_t>(kMaxCParameters));
LocationSignature::Builder locations(zone, msig->return_count(),
msig->parameter_count());
+
+#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
// Check the types of the signature.
- // Currently no floating point parameters or returns are allowed because
- // on ia32, the FP top of stack is involved.
- for (size_t i = 0; i < msig->return_count(); i++) {
- MachineRepresentation rep = msig->GetReturn(i).representation();
- CHECK_NE(MachineRepresentation::kFloat32, rep);
- CHECK_NE(MachineRepresentation::kFloat64, rep);
- }
for (size_t i = 0; i < msig->parameter_count(); i++) {
MachineRepresentation rep = msig->GetParam(i).representation();
CHECK_NE(MachineRepresentation::kFloat32, rep);
CHECK_NE(MachineRepresentation::kFloat64, rep);
}
-
-#ifdef UNSUPPORTED_C_LINKAGE
- // This method should not be called on unknown architectures.
- FATAL("requested C call descriptor on unsupported architecture");
- return nullptr;
#endif
- // Add return location(s).
- CHECK_GE(2, locations.return_count_);
+ // Add return location(s). We don't support FP returns for now.
+ for (size_t i = 0; i < locations.return_count_; i++) {
+ MachineType type = msig->GetReturn(i);
+ CHECK(!IsFloatingPoint(type.representation()));
+ }
+ CHECK_GE(2, locations.return_count_);
if (locations.return_count_ > 0) {
locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
msig->GetReturn(0)));
@@ -178,8 +258,6 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
msig->GetReturn(1)));
}
- const int parameter_count = static_cast<int>(msig->parameter_count());
-
#ifdef PARAM_REGISTERS
const v8::internal::Register kParamRegisters[] = {PARAM_REGISTERS};
const int kParamRegisterCount = static_cast<int>(arraysize(kParamRegisters));
@@ -188,22 +266,17 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
const int kParamRegisterCount = 0;
#endif
-#ifdef STACK_SHADOW_WORDS
- int stack_offset = STACK_SHADOW_WORDS;
+#ifdef FP_PARAM_REGISTERS
+ const DoubleRegister kFPParamRegisters[] = {FP_PARAM_REGISTERS};
+ const size_t kFPParamRegisterCount = arraysize(kFPParamRegisters);
#else
- int stack_offset = 0;
+ const DoubleRegister* kFPParamRegisters = nullptr;
+ const size_t kFPParamRegisterCount = 0;
#endif
+
// Add register and/or stack parameter(s).
- for (int i = 0; i < parameter_count; i++) {
- if (i < kParamRegisterCount) {
- locations.AddParam(LinkageLocation::ForRegister(kParamRegisters[i].code(),
- msig->GetParam(i)));
- } else {
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- -1 - stack_offset, msig->GetParam(i)));
- stack_offset++;
- }
- }
+ BuildParameterLocations(msig, kFPParamRegisterCount, kParamRegisterCount,
+ kFPParamRegisters, kParamRegisters, &locations);
#ifdef CALLEE_SAVE_REGISTERS
const RegList kCalleeSaveRegisters = CALLEE_SAVE_REGISTERS;
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 5f5ff66cfe..273058ba25 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -64,11 +64,8 @@ CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int32_t builtin_index)
: CodeAssemblerState(
isolate, zone,
- Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- (kind == CodeKind::BUILTIN ? CallDescriptor::kPushArgumentCount
- : CallDescriptor::kNoFlags) |
- CallDescriptor::kCanUseRoots),
+ Linkage::GetJSCallDescriptor(zone, false, parameter_count,
+ CallDescriptor::kCanUseRoots),
kind, name, poisoning_level, builtin_index) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 22547aa45c..f68780394a 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -12,6 +12,8 @@
#include "src/common/globals.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/frame-states.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
@@ -565,6 +567,77 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
DISALLOW_COPY_AND_ASSIGN(CommonOperatorBuilder);
};
+// Node wrappers.
+
+class CommonNodeWrapperBase : public NodeWrapper {
+ public:
+ explicit constexpr CommonNodeWrapperBase(Node* node) : NodeWrapper(node) {}
+
+ // Valid iff this node has exactly one effect input.
+ Effect effect() const {
+ DCHECK_EQ(node()->op()->EffectInputCount(), 1);
+ return Effect{NodeProperties::GetEffectInput(node())};
+ }
+
+ // Valid iff this node has exactly one control input.
+ Control control() const {
+ DCHECK_EQ(node()->op()->ControlInputCount(), 1);
+ return Control{NodeProperties::GetControlInput(node())};
+ }
+};
+
+#define DEFINE_INPUT_ACCESSORS(Name, name, TheIndex, Type) \
+ static constexpr int Name##Index() { return TheIndex; } \
+ TNode<Type> name() const { \
+ return TNode<Type>::UncheckedCast( \
+ NodeProperties::GetValueInput(node(), TheIndex)); \
+ }
+
+class StartNode final : public CommonNodeWrapperBase {
+ public:
+ explicit constexpr StartNode(Node* node) : CommonNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kStart);
+ }
+
+ // The receiver is counted as part of formal parameters.
+ static constexpr int kReceiverOutputCount = 1;
+ // These outputs are in addition to formal parameters.
+ static constexpr int kExtraOutputCount = 4;
+
+ // Takes the formal parameter count of the current function (including
+ // receiver) and returns the number of value outputs of the start node.
+ static constexpr int OutputArityForFormalParameterCount(int argc) {
+ constexpr int kClosure = 1;
+ constexpr int kNewTarget = 1;
+ constexpr int kArgCount = 1;
+ constexpr int kContext = 1;
+ STATIC_ASSERT(kClosure + kNewTarget + kArgCount + kContext ==
+ kExtraOutputCount);
+ // Checking related linkage methods here since they rely on Start node
+ // layout.
+ CONSTEXPR_DCHECK(Linkage::kJSCallClosureParamIndex == -1);
+ CONSTEXPR_DCHECK(Linkage::GetJSCallNewTargetParamIndex(argc) == argc + 0);
+ CONSTEXPR_DCHECK(Linkage::GetJSCallArgCountParamIndex(argc) == argc + 1);
+ CONSTEXPR_DCHECK(Linkage::GetJSCallContextParamIndex(argc) == argc + 2);
+ return argc + kClosure + kNewTarget + kArgCount + kContext;
+ }
+
+ int FormalParameterCount() const {
+ DCHECK_GE(node()->op()->ValueOutputCount(),
+ kExtraOutputCount + kReceiverOutputCount);
+ return node()->op()->ValueOutputCount() - kExtraOutputCount;
+ }
+
+ int FormalParameterCountWithoutReceiver() const {
+ DCHECK_GE(node()->op()->ValueOutputCount(),
+ kExtraOutputCount + kReceiverOutputCount);
+ return node()->op()->ValueOutputCount() - kExtraOutputCount -
+ kReceiverOutputCount;
+ }
+};
+
+#undef DEFINE_INPUT_ACCESSORS
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index 33e1930499..263a5a5f1e 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -8,6 +8,7 @@
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
+#include "src/objects/js-function-inl.h"
#include "src/objects/objects-inl.h"
#include "src/zone/zone-handle-set.h"
@@ -179,6 +180,13 @@ class FieldRepresentationDependency final : public CompilationDependency {
DependentCode::kFieldRepresentationGroup);
}
+#ifdef DEBUG
+ bool IsFieldRepresentationDependencyOnMap(
+ Handle<Map> const& receiver_map) const override {
+ return owner_.object().equals(receiver_map);
+ }
+#endif
+
private:
MapRef owner_;
InternalIndex descriptor_;
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
index e5726a0ddb..751e8d9f35 100644
--- a/deps/v8/src/compiler/compilation-dependency.h
+++ b/deps/v8/src/compiler/compilation-dependency.h
@@ -22,6 +22,10 @@ class CompilationDependency : public ZoneObject {
#ifdef DEBUG
virtual bool IsPretenureModeDependency() const { return false; }
+ virtual bool IsFieldRepresentationDependencyOnMap(
+ Handle<Map> const& receiver_map) const {
+ return false;
+ }
#endif
};
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 14dd26ba7f..98ca00c78b 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -45,7 +45,7 @@ class EffectControlLinearizer {
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
- graph_assembler_(js_graph, temp_zone,
+ graph_assembler_(js_graph, temp_zone, base::nullopt,
should_maintain_schedule() ? schedule : nullptr),
frame_state_zapper_(nullptr) {}
@@ -179,7 +179,8 @@ class EffectControlLinearizer {
void LowerCheckEqualsInternalizedString(Node* node, Node* frame_state);
void LowerCheckEqualsSymbol(Node* node, Node* frame_state);
Node* LowerTypeOf(Node* node);
- Node* LowerUpdateInterruptBudget(Node* node);
+ void LowerTierUpCheck(Node* node);
+ void LowerUpdateInterruptBudget(Node* node);
Node* LowerToBoolean(Node* node);
Node* LowerPlainPrimitiveToNumber(Node* node);
Node* LowerPlainPrimitiveToWord32(Node* node);
@@ -265,6 +266,7 @@ class EffectControlLinearizer {
Node* ObjectIsSmi(Node* value);
Node* LoadFromSeqString(Node* receiver, Node* position, Node* is_one_byte);
Node* TruncateWordToInt32(Node* value);
+ Node* MakeWeakForComparison(Node* heap_object);
Node* BuildIsWeakReferenceTo(Node* maybe_object, Node* value);
Node* BuildIsClearedWeakReference(Node* maybe_object);
Node* BuildIsStrongReference(Node* value);
@@ -282,8 +284,9 @@ class EffectControlLinearizer {
DeoptimizeReason reason);
// Helper functions used in LowerDynamicCheckMaps
- void CheckPolymorphic(Node* feedback, Node* value_map, Node* handler,
- GraphAssemblerLabel<0>* done, Node* frame_state);
+ void CheckPolymorphic(Node* expected_polymorphic_array, Node* actual_map,
+ Node* actual_handler, GraphAssemblerLabel<0>* done,
+ Node* frame_state);
void ProcessMonomorphic(Node* handler, GraphAssemblerLabel<0>* done,
Node* frame_state, int slot, Node* vector);
void BranchOnICState(int slot_index, Node* vector, Node* value_map,
@@ -1138,8 +1141,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTypeOf:
result = LowerTypeOf(node);
break;
+ case IrOpcode::kTierUpCheck:
+ LowerTierUpCheck(node);
+ break;
case IrOpcode::kUpdateInterruptBudget:
- result = LowerUpdateInterruptBudget(node);
+ LowerUpdateInterruptBudget(node);
break;
case IrOpcode::kNewDoubleElements:
result = LowerNewDoubleElements(node);
@@ -1881,51 +1887,107 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
}
}
-void EffectControlLinearizer::CheckPolymorphic(Node* feedback_slot,
- Node* value_map, Node* handler,
+void EffectControlLinearizer::CheckPolymorphic(Node* expected_polymorphic_array,
+ Node* actual_map,
+ Node* actual_handler,
GraphAssemblerLabel<0>* done,
Node* frame_state) {
- Node* feedback_slot_map =
- __ LoadField(AccessBuilder::ForMap(), feedback_slot);
- Node* is_weak_fixed_array_check =
- __ TaggedEqual(feedback_slot_map, __ WeakFixedArrayMapConstant());
+ Node* expected_polymorphic_array_map =
+ __ LoadField(AccessBuilder::ForMap(), expected_polymorphic_array);
+ Node* is_weak_fixed_array = __ TaggedEqual(expected_polymorphic_array_map,
+ __ WeakFixedArrayMapConstant());
__ DeoptimizeIfNot(DeoptimizeReason::kTransitionedToMegamorphicIC,
- FeedbackSource(), is_weak_fixed_array_check, frame_state,
+ FeedbackSource(), is_weak_fixed_array, frame_state,
IsSafetyCheck::kCriticalSafetyCheck);
- Node* length = ChangeSmiToInt32(
- __ LoadField(AccessBuilder::ForWeakFixedArrayLength(), feedback_slot));
- auto loop = __ MakeLoopLabel(MachineRepresentation::kWord32);
- __ Goto(&loop, __ Int32Constant(0));
- __ Bind(&loop);
- {
- Node* index = loop.PhiAt(0);
- Node* check = __ Int32LessThan(index, length);
- __ DeoptimizeIfNot(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
- FeedbackSource(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ Node* polymorphic_array = expected_polymorphic_array;
- Node* maybe_map = __ LoadElement(AccessBuilder::ForWeakFixedArrayElement(),
- feedback_slot, index);
- auto continue_loop = __ MakeLabel();
-
- __ GotoIfNot(BuildIsWeakReferenceTo(maybe_map, value_map), &continue_loop);
- constexpr int kHandlerOffsetInEntry = 1;
- Node* maybe_handler = __ LoadElement(
- AccessBuilder::ForWeakFixedArrayElement(), feedback_slot,
- __ Int32Add(index, __ Int32Constant(kHandlerOffsetInEntry)));
- Node* handler_check = __ TaggedEqual(maybe_handler, handler);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
- handler_check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ // This is now a weak pointer that we're holding in the register, we
+ // need to be careful about spilling and reloading it (as it could
+ // get cleared in between). There's no runtime call here that could
+ // cause a spill so we should be safe.
+ Node* weak_actual_map = MakeWeakForComparison(actual_map);
+ Node* length = ChangeSmiToInt32(__ LoadField(
+ AccessBuilder::ForWeakFixedArrayLength(), polymorphic_array));
+ auto do_handler_check = __ MakeLabel(MachineRepresentation::kWord32);
- __ Goto(done);
+ GraphAssemblerLabel<0> labels[] = {__ MakeLabel(), __ MakeLabel(),
+ __ MakeLabel(), __ MakeLabel()};
- __ Bind(&continue_loop);
- constexpr int kEntrySize = 2;
- index = __ Int32Add(index, __ Int32Constant(kEntrySize));
- __ Goto(&loop, index);
+ STATIC_ASSERT(FLAG_max_minimorphic_map_checks == arraysize(labels));
+ DCHECK_GE(FLAG_max_minimorphic_map_checks,
+ FLAG_max_valid_polymorphic_map_count);
+
+ // The following generates a switch based on the length of the
+ // array:
+ //
+ // if length >= 4: goto labels[3]
+ // if length == 3: goto labels[2]
+ // if length == 2: goto labels[1]
+ // if length == 1: goto labels[0]
+ __ GotoIf(__ Int32LessThanOrEqual(
+ __ Int32Constant(FeedbackIterator::SizeFor(4)), length),
+ &labels[3]);
+ __ GotoIf(
+ __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(3))),
+ &labels[2]);
+ __ GotoIf(
+ __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(2))),
+ &labels[1]);
+ __ GotoIf(
+ __ Word32Equal(length, __ Int32Constant(FeedbackIterator::SizeFor(1))),
+ &labels[0]);
+
+ // We should never have an polymorphic feedback array of size 0.
+ __ Unreachable(done);
+
+ // This loop generates code like this to do the dynamic map check:
+ //
+ // labels[3]:
+ // maybe_map = load(polymorphic_array, i)
+ // if weak_actual_map == maybe_map goto handler_check
+ // goto labels[2]
+ // labels[2]:
+ // maybe_map = load(polymorphic_array, i - 1)
+ // if weak_actual_map == maybe_map goto handler_check
+ // goto labels[1]
+ // labels[1]:
+ // maybe_map = load(polymorphic_array, i - 2)
+ // if weak_actual_map == maybe_map goto handler_check
+ // goto labels[0]
+ // labels[0]:
+ // maybe_map = load(polymorphic_array, i - 3)
+ // if weak_actual_map == maybe_map goto handler_check
+ // bailout
+ for (int i = arraysize(labels) - 1; i >= 0; i--) {
+ __ Bind(&labels[i]);
+ Node* maybe_map = __ LoadField(AccessBuilder::ForWeakFixedArraySlot(
+ FeedbackIterator::MapIndexForEntry(i)),
+ polymorphic_array);
+ Node* map_check = __ TaggedEqual(maybe_map, weak_actual_map);
+
+ int handler_index = FeedbackIterator::HandlerIndexForEntry(i);
+ __ GotoIf(map_check, &do_handler_check, __ Int32Constant(handler_index));
+ if (i > 0) {
+ __ Goto(&labels[i - 1]);
+ } else {
+ // TODO(turbofan): Add support for gasm->Deoptimize.
+ __ DeoptimizeIf(DeoptimizeKind::kBailout, DeoptimizeReason::kMissingMap,
+ FeedbackSource(), __ IntPtrConstant(1),
+ FrameState(frame_state));
+ __ Unreachable(done);
+ }
}
+
+ __ Bind(&do_handler_check);
+ Node* handler_index = do_handler_check.PhiAt(0);
+ Node* maybe_handler =
+ __ LoadElement(AccessBuilder::ForWeakFixedArrayElement(),
+ polymorphic_array, handler_index);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongHandler, FeedbackSource(),
+ __ TaggedEqual(maybe_handler, actual_handler), frame_state,
+ IsSafetyCheck::kCriticalSafetyCheck);
+ __ Goto(done);
}
void EffectControlLinearizer::ProcessMonomorphic(Node* handler,
@@ -2599,8 +2661,7 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
__ Branch(check, &done, &if_abort);
__ Bind(&if_abort);
- __ Unreachable();
- __ Goto(&done);
+ __ Unreachable(&done);
__ Bind(&done);
}
@@ -2646,8 +2707,7 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
__ Branch(check, &done, &if_abort);
__ Bind(&if_abort);
- __ Unreachable();
- __ Goto(&done);
+ __ Unreachable(&done);
__ Bind(&done);
}
@@ -3678,12 +3738,89 @@ Node* EffectControlLinearizer::LowerTypeOf(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerUpdateInterruptBudget(Node* node) {
+void EffectControlLinearizer::LowerTierUpCheck(Node* node) {
+ TierUpCheckNode n(node);
+ TNode<FeedbackVector> vector = n.feedback_vector();
+
+ Node* optimization_marker = __ LoadField(
+ AccessBuilder::ForFeedbackVectorOptimizedCodeWeakOrSmi(), vector);
+
+ // TODO(jgruber): The branch introduces a sequence of spills before the
+ // branch (and restores at `fallthrough`) that are completely unnecessary
+ // since the IfFalse continuation ends in a tail call. Investigate how to
+ // avoid these and fix it.
+
+ // TODO(jgruber): Combine the checks below for none/queued, e.g. by
+ // reorganizing OptimizationMarker values such that the least significant bit
+ // says whether the value is interesting or not. Also update the related
+ // check in the InterpreterEntryTrampoline.
+
+ auto fallthrough = __ MakeLabel();
+ auto optimization_marker_is_not_none = __ MakeDeferredLabel();
+ auto optimization_marker_is_neither_none_nor_queued = __ MakeDeferredLabel();
+ __ BranchWithHint(
+ __ TaggedEqual(optimization_marker, __ SmiConstant(static_cast<int>(
+ OptimizationMarker::kNone))),
+ &fallthrough, &optimization_marker_is_not_none, BranchHint::kTrue);
+
+ __ Bind(&optimization_marker_is_not_none);
+ __ BranchWithHint(
+ __ TaggedEqual(optimization_marker,
+ __ SmiConstant(static_cast<int>(
+ OptimizationMarker::kInOptimizationQueue))),
+ &fallthrough, &optimization_marker_is_neither_none_nor_queued,
+ BranchHint::kNone);
+
+ __ Bind(&optimization_marker_is_neither_none_nor_queued);
+
+ // The optimization marker field contains a non-trivial value, and some
+ // action has to be taken. For example, perhaps tier-up has been requested
+ // and we need to kick off a compilation job; or optimized code is available
+ // and should be tail-called.
+ //
+ // Currently we delegate these tasks to the InterpreterEntryTrampoline.
+ // TODO(jgruber,v8:8888): Consider a dedicated builtin instead.
+
+ const int parameter_count =
+ StartNode{graph()->start()}.FormalParameterCount();
+ TNode<HeapObject> code =
+ __ HeapConstant(BUILTIN_CODE(isolate(), InterpreterEntryTrampoline));
+ Node* target = __ Parameter(Linkage::kJSCallClosureParamIndex);
+ Node* new_target =
+ __ Parameter(Linkage::GetJSCallNewTargetParamIndex(parameter_count));
+ Node* argc =
+ __ Parameter(Linkage::GetJSCallArgCountParamIndex(parameter_count));
+ Node* context =
+ __ Parameter(Linkage::GetJSCallContextParamIndex(parameter_count));
+
+ JSTrampolineDescriptor descriptor;
+ CallDescriptor::Flags flags = CallDescriptor::kFixedTargetRegister |
+ CallDescriptor::kIsTailCallForTierUp;
+ auto call_descriptor = Linkage::GetStubCallDescriptor(
+ graph()->zone(), descriptor, descriptor.GetStackParameterCount(), flags,
+ Operator::kNoProperties);
+ Node* nodes[] = {code, target, new_target, argc,
+ context, __ effect(), __ control()};
+
+#ifdef DEBUG
+ static constexpr int kCodeContextEffectControl = 4;
+ DCHECK_EQ(arraysize(nodes),
+ descriptor.GetParameterCount() + kCodeContextEffectControl);
+#endif // DEBUG
+
+ __ TailCall(call_descriptor, arraysize(nodes), nodes);
+
+ __ Bind(&fallthrough);
+}
+
+void EffectControlLinearizer::LowerUpdateInterruptBudget(Node* node) {
UpdateInterruptBudgetNode n(node);
TNode<FeedbackCell> feedback_cell = n.feedback_cell();
TNode<Int32T> budget = __ LoadField<Int32T>(
AccessBuilder::ForFeedbackCellInterruptBudget(), feedback_cell);
Node* new_budget = __ Int32Add(budget, __ Int32Constant(n.delta()));
+ __ StoreField(AccessBuilder::ForFeedbackCellInterruptBudget(), feedback_cell,
+ new_budget);
if (n.delta() < 0) {
auto next = __ MakeLabel();
auto if_budget_exhausted = __ MakeDeferredLabel();
@@ -3697,9 +3834,6 @@ Node* EffectControlLinearizer::LowerUpdateInterruptBudget(Node* node) {
__ Bind(&next);
}
- __ StoreField(AccessBuilder::ForFeedbackCellInterruptBudget(), feedback_cell,
- new_budget);
- return nullptr;
}
Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
@@ -3716,6 +3850,14 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ return ChangeIntPtrToSmi(
+ __ Load(MachineType::Pointer(), __ LoadFramePointer(),
+ __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+#else
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
+ Node* frame = __ LoadFramePointer();
+
Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
int formal_parameter_count = FormalParameterCountOf(node->op());
DCHECK_LE(0, formal_parameter_count);
@@ -3724,9 +3866,6 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
// We have to distinguish the case when there is an arguments adaptor frame
// (i.e., arguments_frame != LoadFramePointer()).
auto if_adaptor_frame = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
-
- Node* frame = __ LoadFramePointer();
__ GotoIf(__ TaggedEqual(arguments_frame, frame), &done,
__ SmiConstant(formal_parameter_count));
__ Goto(&if_adaptor_frame);
@@ -3736,24 +3875,30 @@ Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
MachineType::Pointer(), arguments_frame,
__ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
__ Goto(&done, arguments_length);
-
__ Bind(&done);
return done.PhiAt(0);
+#endif
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
- Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
int formal_parameter_count = FormalParameterCountOf(node->op());
DCHECK_LE(0, formal_parameter_count);
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
+ Node* frame = __ LoadFramePointer();
+
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ Node* arguments_length = ChangeIntPtrToSmi(
+ __ Load(MachineType::Pointer(), frame,
+ __ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+#else
+ Node* arguments_frame = NodeProperties::GetValueInput(node, 0);
+
// The RestLength node is computing the number of rest parameters,
// which is max(0, actual_parameter_count - formal_parameter_count).
// We have to distinguish the case, when there is an arguments adaptor frame
// (i.e., arguments_frame != LoadFramePointer()).
auto if_adaptor_frame = __ MakeLabel();
- auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned);
-
- Node* frame = __ LoadFramePointer();
__ GotoIf(__ TaggedEqual(arguments_frame, frame), &done, __ SmiConstant(0));
__ Goto(&if_adaptor_frame);
@@ -3761,6 +3906,7 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = __ BitcastWordToTaggedSigned(__ Load(
MachineType::Pointer(), arguments_frame,
__ IntPtrConstant(ArgumentsAdaptorFrameConstants::kLengthOffset)));
+#endif
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
@@ -4019,7 +4165,11 @@ Node* EffectControlLinearizer::LowerNumberSameValue(Node* node) {
Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
if (input->opcode() != IrOpcode::kUnreachable) {
- Node* unreachable = __ Unreachable();
+ // There is no fundamental reason not to connect to end here, except it
+ // integrates into the way the graph is constructed in a simpler way at
+ // this point.
+ // TODO(jgruber): Connect to end here as well.
+ Node* unreachable = __ UnreachableWithoutConnectToEnd();
NodeProperties::ReplaceValueInput(node, unreachable, 0);
}
return gasm()->AddNode(node);
@@ -5104,8 +5254,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
}
builder.AddParam(MachineType::Pointer()); // has_error
- CallDescriptor* call_descriptor = Linkage::GetSimplifiedCDescriptor(
- graph()->zone(), builder.Build(), CallDescriptor::kNoFlags);
+ CallDescriptor* call_descriptor =
+ Linkage::GetSimplifiedCDescriptor(graph()->zone(), builder.Build());
call_descriptor->SetCFunctionInfo(c_signature);
@@ -5698,9 +5848,7 @@ void EffectControlLinearizer::LowerTransitionAndStoreNumberElement(Node* node) {
// loop peeling can break this assumption.
__ GotoIf(__ Word32Equal(kind, __ Int32Constant(HOLEY_DOUBLE_ELEMENTS)),
&do_store);
- // TODO(turbofan): It would be good to have an "Unreachable()" node type.
- __ DebugBreak();
- __ Goto(&do_store);
+ __ Unreachable(&do_store);
}
__ Bind(&transition_smi_array); // deferred code.
@@ -5878,7 +6026,7 @@ Node* EffectControlLinearizer::LowerAssertType(Node* node) {
Node* const min = __ NumberConstant(range->Min());
Node* const max = __ NumberConstant(range->Max());
CallBuiltin(Builtins::kCheckNumberInRange, node->op()->properties(), input,
- min, max);
+ min, max, __ SmiConstant(node->id()));
return input;
}
@@ -6438,6 +6586,13 @@ Node* EffectControlLinearizer::BuildIsStrongReference(Node* value) {
__ Int32Constant(kHeapObjectTag));
}
+Node* EffectControlLinearizer::MakeWeakForComparison(Node* heap_object) {
+ // TODO(gsathya): Specialize this for pointer compression.
+ return __ BitcastWordToTagged(
+ __ WordOr(__ BitcastTaggedToWord(heap_object),
+ __ IntPtrConstant(kWeakHeapObjectTag)));
+}
+
Node* EffectControlLinearizer::BuildStrongReferenceFromWeakReference(
Node* maybe_object) {
return __ BitcastWordToTagged(
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index b3c348c579..89a8d4e118 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -318,8 +318,13 @@ void EscapeAnalysisReducer::Finalize() {
#ifdef V8_REVERSE_JSARGS
Node* offset_to_first_elem = jsgraph()->Constant(
CommonFrameConstants::kFixedSlotCountAboveFp);
- NodeProperties::SetType(offset_to_first_elem,
- TypeCache::Get()->kArgumentsLengthType);
+ if (!NodeProperties::IsTyped(offset_to_first_elem)) {
+ NodeProperties::SetType(
+ offset_to_first_elem,
+ Type::Constant(CommonFrameConstants::kFixedSlotCountAboveFp,
+ jsgraph()->graph()->zone()));
+ }
+
Node* offset = jsgraph()->graph()->NewNode(
jsgraph()->simplified()->NumberAdd(), index,
offset_to_first_elem);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index ae999b6615..975efedf0f 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -32,6 +32,7 @@ class GraphAssembler::BasicBlockUpdater {
void AddBranch(Node* branch, BasicBlock* tblock, BasicBlock* fblock);
void AddGoto(BasicBlock* to);
void AddGoto(BasicBlock* from, BasicBlock* to);
+ void AddTailCall(Node* node);
void StartBlock(BasicBlock* block);
BasicBlock* Finalize(BasicBlock* original);
@@ -267,6 +268,18 @@ void GraphAssembler::BasicBlockUpdater::AddGoto(BasicBlock* from,
current_block_ = nullptr;
}
+void GraphAssembler::BasicBlockUpdater::AddTailCall(Node* node) {
+ DCHECK_EQ(node->opcode(), IrOpcode::kTailCall);
+ DCHECK_NOT_NULL(current_block_);
+
+ if (state_ == kUnchanged) {
+ CopyForChange();
+ }
+
+ schedule_->AddTailCall(current_block_, node);
+ current_block_ = nullptr;
+}
+
void GraphAssembler::BasicBlockUpdater::UpdateSuccessors(BasicBlock* block) {
for (SuccessorInfo succ : saved_successors_) {
(succ.block->predecessors())[succ.index] = block;
@@ -316,12 +329,15 @@ BasicBlock* GraphAssembler::BasicBlockUpdater::Finalize(BasicBlock* original) {
return block;
}
-GraphAssembler::GraphAssembler(MachineGraph* mcgraph, Zone* zone,
- Schedule* schedule, bool mark_loop_exits)
+GraphAssembler::GraphAssembler(
+ MachineGraph* mcgraph, Zone* zone,
+ base::Optional<NodeChangedCallback> node_changed_callback,
+ Schedule* schedule, bool mark_loop_exits)
: temp_zone_(zone),
mcgraph_(mcgraph),
effect_(nullptr),
control_(nullptr),
+ node_changed_callback_(node_changed_callback),
block_updater_(schedule != nullptr
? new BasicBlockUpdater(schedule, mcgraph->graph(),
mcgraph->common(), zone)
@@ -380,6 +396,11 @@ Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
return AddClonedNode(mcgraph()->ExternalConstant(ref));
}
+Node* GraphAssembler::Parameter(int index) {
+ return AddNode(
+ graph()->NewNode(common()->Parameter(index), graph()->start()));
+}
+
Node* JSGraphAssembler::CEntryStubConstant(int result_size) {
return AddClonedNode(jsgraph()->CEntryStubConstant(result_size));
}
@@ -624,7 +645,20 @@ Node* GraphAssembler::DebugBreak() {
graph()->NewNode(machine()->DebugBreak(), effect(), control()));
}
-Node* GraphAssembler::Unreachable() {
+Node* GraphAssembler::Unreachable(
+ GraphAssemblerLabel<0u>* block_updater_successor) {
+ Node* result = UnreachableWithoutConnectToEnd();
+ if (block_updater_ == nullptr) {
+ ConnectUnreachableToEnd();
+ InitializeEffectControl(nullptr, nullptr);
+ } else {
+ DCHECK_NOT_NULL(block_updater_successor);
+ Goto(block_updater_successor);
+ }
+ return result;
+}
+
+Node* GraphAssembler::UnreachableWithoutConnectToEnd() {
return AddNode(
graph()->NewNode(common()->Unreachable(), effect(), control()));
}
@@ -685,9 +719,9 @@ Node* GraphAssembler::UnsafePointerAdd(Node* base, Node* external) {
}
TNode<Number> JSGraphAssembler::PlainPrimitiveToNumber(TNode<Object> value) {
- return AddNode<Number>(graph()->NewNode(PlainPrimitiveToNumberOperator(),
- ToNumberBuiltinConstant(), value,
- NoContextConstant(), effect()));
+ return AddNode<Number>(graph()->NewNode(
+ PlainPrimitiveToNumberOperator(), PlainPrimitiveToNumberBuiltinConstant(),
+ value, effect()));
}
Node* GraphAssembler::BitcastWordToTaggedSigned(Node* value) {
@@ -768,6 +802,28 @@ TNode<Object> GraphAssembler::Call(const Operator* op, int inputs_size,
return AddNode<Object>(graph()->NewNode(op, inputs_size, inputs));
}
+void GraphAssembler::TailCall(const CallDescriptor* call_descriptor,
+ int inputs_size, Node** inputs) {
+#ifdef DEBUG
+ static constexpr int kTargetEffectControl = 3;
+ DCHECK_EQ(inputs_size,
+ call_descriptor->ParameterCount() + kTargetEffectControl);
+#endif // DEBUG
+
+ Node* node = AddNode(graph()->NewNode(common()->TailCall(call_descriptor),
+ inputs_size, inputs));
+
+ if (block_updater_) block_updater_->AddTailCall(node);
+
+ // Unlike ConnectUnreachableToEnd, the TailCall node terminates a block; to
+ // keep it live, it *must* be connected to End (also in Turboprop schedules).
+ NodeProperties::MergeControlToEnd(graph(), common(), node);
+
+ // Setting effect, control to nullptr effectively terminates the current block
+ // by disallowing the addition of new nodes until a new label has been bound.
+ InitializeEffectControl(nullptr, nullptr);
+}
+
void GraphAssembler::BranchWithCriticalSafetyCheck(
Node* condition, GraphAssemblerLabel<0u>* if_true,
GraphAssemblerLabel<0u>* if_false) {
@@ -860,10 +916,13 @@ void GraphAssembler::ConnectUnreachableToEnd() {
// to disconnect them from the graph, so we just leave the unreachable nodes
// in the schedule.
// TODO(9684): Add a scheduled dead-code elimination phase to remove all the
- // subsiquent unreacahble code from the schedule.
+ // subsequent unreachable code from the schedule.
if (!block_updater_) {
Node* throw_node = graph()->NewNode(common()->Throw(), effect(), control());
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ if (node_changed_callback_.has_value()) {
+ (*node_changed_callback_)(graph()->end());
+ }
effect_ = control_ = mcgraph()->Dead();
}
}
@@ -906,7 +965,8 @@ void GraphAssembler::InitializeEffectControl(Node* effect, Node* control) {
Operator const* JSGraphAssembler::PlainPrimitiveToNumberOperator() {
if (!to_number_operator_.is_set()) {
- Callable callable = Builtins::CallableFor(isolate(), Builtins::kToNumber);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kPlainPrimitiveToNumber);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 2b2dbb5d67..1be52317c0 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -91,6 +91,7 @@ class BasicBlock;
V(Word64Or) \
V(WordAnd) \
V(WordEqual) \
+ V(WordOr) \
V(WordSar) \
V(WordSarShiftOutZeros) \
V(WordShl) \
@@ -126,6 +127,7 @@ class BasicBlock;
V(One, Number) \
V(TheHole, Oddball) \
V(ToNumberBuiltin, Code) \
+ V(PlainPrimitiveToNumberBuiltin, Code) \
V(True, Boolean) \
V(Undefined, Oddball) \
V(Zero, Number)
@@ -182,12 +184,15 @@ class GraphAssemblerLabel {
const std::array<MachineRepresentation, VarCount> representations_;
};
+using NodeChangedCallback = std::function<void(Node*)>;
class V8_EXPORT_PRIVATE GraphAssembler {
public:
// Constructs a GraphAssembler. If {schedule} is not null, the graph assembler
// will maintain the schedule as it updates blocks.
- GraphAssembler(MachineGraph* jsgraph, Zone* zone,
- Schedule* schedule = nullptr, bool mark_loop_exits = false);
+ GraphAssembler(
+ MachineGraph* jsgraph, Zone* zone,
+ base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
+ Schedule* schedule = nullptr, bool mark_loop_exits = false);
virtual ~GraphAssembler();
void Reset(BasicBlock* block);
@@ -239,6 +244,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Projection(int index, Node* value);
Node* ExternalConstant(ExternalReference ref);
+ Node* Parameter(int index);
+
Node* LoadFramePointer();
Node* LoadHeapNumberValue(Node* heap_number);
@@ -252,10 +259,20 @@ class V8_EXPORT_PRIVATE GraphAssembler {
CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
#undef BINOP_DECL
- // Debugging
Node* DebugBreak();
- Node* Unreachable();
+ // Unreachable nodes are similar to Goto in that they reset effect/control to
+ // nullptr and it's thus not possible to append other nodes without first
+ // binding a new label.
+ // The block_updater_successor label is a crutch to work around block updater
+ // weaknesses (see the related comment in ConnectUnreachableToEnd); if the
+ // block updater exists, we cannot connect unreachable to end, instead we
+ // must preserve the Goto pattern.
+ Node* Unreachable(GraphAssemblerLabel<0u>* block_updater_successor = nullptr);
+ // This special variant doesn't connect the Unreachable node to end, and does
+ // not reset current effect/control. Intended only for special use-cases like
+ // lowering DeadValue.
+ Node* UnreachableWithoutConnectToEnd();
Node* IntPtrEqual(Node* left, Node* right);
Node* TaggedEqual(Node* left, Node* right);
@@ -315,6 +332,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Args... args);
template <typename... Args>
TNode<Object> Call(const Operator* op, Node* first_arg, Args... args);
+ void TailCall(const CallDescriptor* call_descriptor, int inputs_size,
+ Node** inputs);
// Basic control operations.
template <size_t VarCount>
@@ -350,6 +369,13 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void GotoIfNot(Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* label,
Vars...);
+ bool HasActiveBlock() const {
+ // This is false if the current block has been terminated (e.g. by a Goto or
+ // Unreachable). In that case, a new label must be bound before we can
+ // continue emitting nodes.
+ return control() != nullptr;
+ }
+
// Updates current effect and control based on outputs of {node}.
V8_INLINE void UpdateEffectControlWith(Node* node) {
if (node->op()->EffectOutputCount() > 0) {
@@ -375,8 +401,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void ConnectUnreachableToEnd();
- Control control() { return Control(control_); }
- Effect effect() { return Effect(effect_); }
+ Control control() const { return Control(control_); }
+ Effect effect() const { return Effect(effect_); }
protected:
class BasicBlockUpdater;
@@ -485,6 +511,9 @@ class V8_EXPORT_PRIVATE GraphAssembler {
MachineGraph* mcgraph_;
Node* effect_;
Node* control_;
+ // {node_changed_callback_} should be called when a node outside the
+ // subgraph created by the graph assembler changes.
+ base::Optional<NodeChangedCallback> node_changed_callback_;
std::unique_ptr<BasicBlockUpdater> block_updater_;
// Track loop information in order to properly mark loop exits with
@@ -753,9 +782,12 @@ class V8_EXPORT_PRIVATE JSGraphAssembler : public GraphAssembler {
public:
// Constructs a JSGraphAssembler. If {schedule} is not null, the graph
// assembler will maintain the schedule as it updates blocks.
- JSGraphAssembler(JSGraph* jsgraph, Zone* zone, Schedule* schedule = nullptr,
- bool mark_loop_exits = false)
- : GraphAssembler(jsgraph, zone, schedule, mark_loop_exits),
+ JSGraphAssembler(
+ JSGraph* jsgraph, Zone* zone,
+ base::Optional<NodeChangedCallback> node_changed_callback = base::nullopt,
+ Schedule* schedule = nullptr, bool mark_loop_exits = false)
+ : GraphAssembler(jsgraph, zone, node_changed_callback, schedule,
+ mark_loop_exits),
jsgraph_(jsgraph) {}
Node* SmiConstant(int32_t value);
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 17789a0432..155d6fa8ef 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -2,12 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/graph-reducer.h"
+
#include <functional>
#include <limits>
#include "src/codegen/tick-counter.h"
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/verifier.h"
@@ -27,14 +29,15 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
GraphReducer::GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- Node* dead)
+ JSHeapBroker* broker, Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
stack_(zone),
- tick_counter_(tick_counter) {
+ tick_counter_(tick_counter),
+ broker_(broker) {
if (dead != nullptr) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -94,6 +97,9 @@ Reduction GraphReducer::Reduce(Node* const node) {
// all the other reducers for this node, as now there may be more
// opportunities for reduction.
if (FLAG_trace_turbo_reduction) {
+ UnparkedScopeIfNeeded unparked(broker_);
+ // TODO(neis): Disallow racy handle dereference once we stop
+ // supporting --no-local-heaps --no-turbo-direct-heap-access.
AllowHandleDereference allow_deref;
StdoutStream{} << "- In-place update of #" << *node << " by reducer "
<< (*i)->reducer_name() << std::endl;
@@ -104,6 +110,9 @@ Reduction GraphReducer::Reduce(Node* const node) {
} else {
// {node} was replaced by another node.
if (FLAG_trace_turbo_reduction) {
+ UnparkedScopeIfNeeded unparked(broker_);
+ // TODO(neis): Disallow racy handle dereference once we stop
+ // supporting --no-local-heaps --no-turbo-direct-heap-access.
AllowHandleDereference allow_deref;
StdoutStream{} << "- Replacement of #" << *node << " with #"
<< *(reduction.replacement()) << " by reducer "
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index 3c15214d93..95454098d5 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -17,8 +17,8 @@ class TickCounter;
namespace compiler {
-// Forward declarations.
class Graph;
+class JSHeapBroker;
class Node;
// NodeIds are identifying numbers for nodes that can be used to index auxiliary
@@ -136,7 +136,7 @@ class V8_EXPORT_PRIVATE GraphReducer
: public NON_EXPORTED_BASE(AdvancedReducer::Editor) {
public:
GraphReducer(Zone* zone, Graph* graph, TickCounter* tick_counter,
- Node* dead = nullptr);
+ JSHeapBroker* broker, Node* dead = nullptr);
~GraphReducer() override;
Graph* graph() const { return graph_; }
@@ -189,6 +189,7 @@ class V8_EXPORT_PRIVATE GraphReducer
ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
TickCounter* const tick_counter_;
+ JSHeapBroker* const broker_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
};
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index f767e9ea4d..36372f5d02 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -774,10 +774,7 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
}
}
- // The toplevel range might be a splinter. Pre-resolve those here so that
- // they have a proper parent.
const TopLevelLiveRange* parent = range->TopLevel();
- if (parent->IsSplinter()) parent = parent->splintered_from();
os_ << " " << parent->vreg() << ":" << parent->relative_id();
// TODO(herhut) Find something useful to print for the hint field
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 137f61cc49..f66b678632 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -10,6 +10,7 @@
#include "src/objects/elements-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/instance-type.h"
+#include "src/utils/boxed-float.h"
namespace v8 {
class CFunctionInfo;
@@ -48,56 +49,67 @@ enum class OddballType : uint8_t {
};
// This list is sorted such that subtypes appear before their supertypes.
+// This list must not contain a type if it doesn't contain all of its subtypes
+// too. For example, it CANNOT contain FixedArrayBase if it doesn't contain
+// FixedDoubleArray, BytecodeArray and FixedArray.
+// DO NOT VIOLATE THESE TWO PROPERTIES!
+// Classes on this list will skip serialization when
+// FLAG_turbo_direct_heap_access is on. Otherwise, they might get serialized.
+#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of FixedArray */ \
+ V(ObjectBoilerplateDescription) \
+ /* Subtypes of HeapObject */ \
+ V(AccessorInfo) \
+ V(ArrayBoilerplateDescription) \
+ V(Cell) \
+ V(TemplateObjectDescription)
+
+// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray) \
- V(JSBoundFunction) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(JSRegExp) \
- V(JSTypedArray) \
- /* Subtypes of Context */ \
- V(NativeContext) \
- /* Subtypes of FixedArray */ \
- V(Context) \
- V(ObjectBoilerplateDescription) \
- V(ScopeInfo) \
- V(ScriptContextTable) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
- V(FixedArray) \
- V(FixedDoubleArray) \
- /* Subtypes of Name */ \
- V(InternalizedString) \
- V(String) \
- V(Symbol) \
- /* Subtypes of JSReceiver */ \
- V(JSObject) \
- /* Subtypes of HeapObject */ \
- V(AccessorInfo) \
- V(AllocationSite) \
- V(ArrayBoilerplateDescription) \
- V(BigInt) \
- V(CallHandlerInfo) \
- V(Cell) \
- V(Code) \
- V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FixedArrayBase) \
- V(FunctionTemplateInfo) \
- V(HeapNumber) \
- V(JSReceiver) \
- V(Map) \
- V(Name) \
- V(PropertyCell) \
- V(SharedFunctionInfo) \
- V(SourceTextModule) \
- V(TemplateObjectDescription) \
- /* Subtypes of Object */ \
+#define HEAP_BROKER_SERIALIZED_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray) \
+ V(JSBoundFunction) \
+ V(JSDataView) \
+ V(JSFunction) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(JSRegExp) \
+ V(JSTypedArray) \
+ /* Subtypes of Context */ \
+ V(NativeContext) \
+ /* Subtypes of FixedArray */ \
+ V(Context) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ /* Subtypes of Name */ \
+ V(InternalizedString) \
+ V(String) \
+ V(Symbol) \
+ /* Subtypes of JSReceiver */ \
+ V(JSObject) \
+ /* Subtypes of HeapObject */ \
+ V(AllocationSite) \
+ V(BigInt) \
+ V(CallHandlerInfo) \
+ V(Code) \
+ V(DescriptorArray) \
+ V(FeedbackCell) \
+ V(FeedbackVector) \
+ V(FixedArrayBase) \
+ V(FunctionTemplateInfo) \
+ V(HeapNumber) \
+ V(JSReceiver) \
+ V(Map) \
+ V(Name) \
+ V(PropertyCell) \
+ V(SharedFunctionInfo) \
+ V(SourceTextModule) \
+ /* Subtypes of Object */ \
V(HeapObject)
class CompilationDependencies;
@@ -107,7 +119,8 @@ class ObjectData;
class PerIsolateCompilerCache;
class PropertyAccessInfo;
#define FORWARD_DECL(Name) class Name##Ref;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
class V8_EXPORT_PRIVATE ObjectRef {
@@ -127,11 +140,13 @@ class V8_EXPORT_PRIVATE ObjectRef {
int AsSmi() const;
#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
- HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
bool IsNullOrUndefined() const;
@@ -293,7 +308,6 @@ class JSDataViewRef : public JSObjectRef {
Handle<JSDataView> object() const;
size_t byte_length() const;
- size_t byte_offset() const;
};
class JSBoundFunctionRef : public JSObjectRef {
@@ -704,8 +718,7 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
Handle<FixedDoubleArray> object() const;
- double get_scalar(int i) const;
- bool is_the_hole(int i) const;
+ Float64 get(int i) const;
};
class BytecodeArrayRef : public FixedArrayBaseRef {
@@ -722,8 +735,7 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
uint8_t get(int index) const;
Address GetFirstBytecodeAddress() const;
- // Source position table.
- Handle<ByteArray> source_positions() const;
+ Handle<ByteArray> SourcePositionTable() const;
// Constant pool access.
Handle<Object> GetConstantAtIndex(int index) const;
@@ -877,8 +889,6 @@ class CellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(Cell, HeapObjectRef)
Handle<Cell> object() const;
-
- ObjectRef value() const;
};
class JSGlobalObjectRef : public JSObjectRef {
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 0a48bcbcc6..94a6b3a7c7 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -51,10 +51,15 @@ class JSCallReducerAssembler : public JSGraphAssembler {
static constexpr bool kMarkLoopExits = true;
public:
- JSCallReducerAssembler(JSGraph* jsgraph, Zone* zone, Node* node)
- : JSGraphAssembler(jsgraph, zone, nullptr, kMarkLoopExits),
+ JSCallReducerAssembler(JSCallReducer* reducer, Node* node)
+ : JSGraphAssembler(
+ reducer->JSGraphForGraphAssembler(),
+ reducer->ZoneForGraphAssembler(),
+ [reducer](Node* n) { reducer->RevisitForGraphAssembler(n); },
+ nullptr, kMarkLoopExits),
node_(node),
- outermost_catch_scope_(CatchScope::Outermost(zone)),
+ outermost_catch_scope_(
+ CatchScope::Outermost(reducer->ZoneForGraphAssembler())),
catch_scope_(&outermost_catch_scope_) {
InitializeEffectControl(NodeProperties::GetEffectInput(node),
NodeProperties::GetControlInput(node));
@@ -143,11 +148,11 @@ class JSCallReducerAssembler : public JSGraphAssembler {
gasm_->Bind(&if_true);
if (then_body_) then_body_();
- gasm_->Goto(&merge);
+ if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&if_false);
if (else_body_) else_body_();
- gasm_->Goto(&merge);
+ if (gasm_->HasActiveBlock()) gasm_->Goto(&merge);
gasm_->Bind(&merge);
}
@@ -209,11 +214,13 @@ class JSCallReducerAssembler : public JSGraphAssembler {
gasm_->Bind(&if_true);
TNode<T> then_result = then_body_();
- gasm_->Goto(&merge, then_result);
+ if (gasm_->HasActiveBlock()) gasm_->Goto(&merge, then_result);
gasm_->Bind(&if_false);
TNode<T> else_result = else_body_();
- gasm_->Goto(&merge, else_result);
+ if (gasm_->HasActiveBlock()) {
+ gasm_->Goto(&merge, else_result);
+ }
gasm_->Bind(&merge);
return merge.PhiAt<T>(0);
@@ -658,9 +665,8 @@ enum class ArrayIndexOfIncludesVariant { kIncludes, kIndexOf };
// builtins.
class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
public:
- IteratingArrayBuiltinReducerAssembler(JSGraph* jsgraph, Zone* zone,
- Node* node)
- : JSCallReducerAssembler(jsgraph, zone, node) {
+ IteratingArrayBuiltinReducerAssembler(JSCallReducer* reducer, Node* node)
+ : JSCallReducerAssembler(reducer, node) {
DCHECK(FLAG_turbo_inline_array_builtins);
}
@@ -784,9 +790,9 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
public:
- PromiseBuiltinReducerAssembler(JSGraph* jsgraph, Zone* zone, Node* node,
+ PromiseBuiltinReducerAssembler(JSCallReducer* reducer, Node* node,
JSHeapBroker* broker)
- : JSCallReducerAssembler(jsgraph, zone, node), broker_(broker) {
+ : JSCallReducerAssembler(reducer, node), broker_(broker) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
}
@@ -876,12 +882,12 @@ class PromiseBuiltinReducerAssembler : public JSCallReducerAssembler {
class FastApiCallReducerAssembler : public JSCallReducerAssembler {
public:
FastApiCallReducerAssembler(
- JSGraph* jsgraph, Zone* zone, Node* node, Address c_function,
+ JSCallReducer* reducer, Node* node, Address c_function,
const CFunctionInfo* c_signature,
const FunctionTemplateInfoRef function_template_info, Node* receiver,
Node* holder, const SharedFunctionInfoRef shared, Node* target,
const int arity, Node* effect)
- : JSCallReducerAssembler(jsgraph, zone, node),
+ : JSCallReducerAssembler(reducer, node),
c_function_(c_function),
c_signature_(c_signature),
function_template_info_(function_template_info),
@@ -1420,7 +1426,6 @@ TNode<Object> IteratingArrayBuiltinReducerAssembler::ReduceArrayPrototypeReduce(
Bind(&continue_label);
});
Unreachable(); // The loop is exited either by deopt or a jump to below.
- InitializeEffectControl(nullptr, nullptr);
// TODO(jgruber): This manual fiddling with blocks could be avoided by
// implementing a `break` mechanic for loop builders.
@@ -2244,7 +2249,7 @@ Reduction JSCallReducer::ReduceMathUnary(Node* node, const Operator* op) {
return Replace(value);
}
- JSCallReducerAssembler a(jsgraph(), temp_zone(), node);
+ JSCallReducerAssembler a(this, node);
Node* subgraph = a.ReduceMathUnary(op);
return ReplaceWithSubgraph(&a, subgraph);
}
@@ -2261,7 +2266,7 @@ Reduction JSCallReducer::ReduceMathBinary(Node* node, const Operator* op) {
return Replace(value);
}
- JSCallReducerAssembler a(jsgraph(), temp_zone(), node);
+ JSCallReducerAssembler a(this, node);
Node* subgraph = a.ReduceMathBinary(op);
return ReplaceWithSubgraph(&a, subgraph);
}
@@ -3283,7 +3288,7 @@ Reduction JSCallReducer::ReduceArrayForEach(
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeForEach(
h.inference(), h.has_stability_dependency(), h.elements_kind(), shared);
@@ -3296,7 +3301,7 @@ Reduction JSCallReducer::ReduceArrayReduce(
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeReduce(
h.inference(), h.has_stability_dependency(), h.elements_kind(),
@@ -3310,7 +3315,7 @@ Reduction JSCallReducer::ReduceArrayReduceRight(
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeReduce(
h.inference(), h.has_stability_dependency(), h.elements_kind(),
@@ -3329,7 +3334,7 @@ Reduction JSCallReducer::ReduceArrayMap(Node* node,
return h.inference()->NoChange();
}
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph =
@@ -3349,7 +3354,7 @@ Reduction JSCallReducer::ReduceArrayFilter(
return h.inference()->NoChange();
}
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph =
@@ -3364,7 +3369,7 @@ Reduction JSCallReducer::ReduceArrayFind(Node* node,
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeFind(
@@ -3379,7 +3384,7 @@ Reduction JSCallReducer::ReduceArrayFindIndex(
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeFind(
@@ -3394,7 +3399,7 @@ Reduction JSCallReducer::ReduceArrayEvery(Node* node,
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeEverySome(
@@ -3410,7 +3415,7 @@ Reduction JSCallReducer::ReduceArrayIncludes(Node* node) {
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeIndexOfIncludes(
@@ -3425,7 +3430,7 @@ Reduction JSCallReducer::ReduceArrayIndexOf(Node* node) {
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeIndexOfIncludes(
@@ -3439,7 +3444,7 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
IteratingArrayBuiltinHelper h(node, broker(), jsgraph(), dependencies());
if (!h.can_reduce()) return h.inference()->NoChange();
- IteratingArrayBuiltinReducerAssembler a(jsgraph(), temp_zone(), node);
+ IteratingArrayBuiltinReducerAssembler a(this, node);
a.InitializeEffectControl(h.effect(), h.control());
TNode<Object> subgraph = a.ReduceArrayPrototypeEverySome(
@@ -3623,9 +3628,9 @@ Reduction JSCallReducer::ReduceCallApiFunction(
if (FLAG_turbo_fast_api_calls && c_function != kNullAddress) {
const CFunctionInfo* c_signature = function_template_info.c_signature();
- FastApiCallReducerAssembler a(jsgraph(), graph()->zone(), node, c_function,
- c_signature, function_template_info, receiver,
- holder, shared, target, argc, effect);
+ FastApiCallReducerAssembler a(this, node, c_function, c_signature,
+ function_template_info, receiver, holder,
+ shared, target, argc, effect);
Node* fast_call_subgraph = a.ReduceFastApiCall();
ReplaceWithSubgraph(&a, fast_call_subgraph);
@@ -4804,7 +4809,7 @@ Reduction JSCallReducer::ReduceStringPrototypeSubstring(Node* node) {
return NoChange();
}
- JSCallReducerAssembler a(jsgraph(), temp_zone(), node);
+ JSCallReducerAssembler a(this, node);
Node* subgraph = a.ReduceStringPrototypeSubstring();
return ReplaceWithSubgraph(&a, subgraph);
}
@@ -4818,7 +4823,7 @@ Reduction JSCallReducer::ReduceStringPrototypeSlice(Node* node) {
return NoChange();
}
- JSCallReducerAssembler a(jsgraph(), temp_zone(), node);
+ JSCallReducerAssembler a(this, node);
Node* subgraph = a.ReduceStringPrototypeSlice();
return ReplaceWithSubgraph(&a, subgraph);
}
@@ -6316,7 +6321,7 @@ Reduction JSCallReducer::ReducePromiseConstructor(Node* node) {
if (broker()->is_native_context_independent()) return NoChange();
DisallowHeapAccessIf no_heap_access(should_disallow_heap_access());
- PromiseBuiltinReducerAssembler a(jsgraph(), temp_zone(), node, broker());
+ PromiseBuiltinReducerAssembler a(this, node, broker());
// We only inline when we have the executor.
if (a.ConstructArity() < 1) return NoChange();
@@ -7322,11 +7327,11 @@ Reduction JSCallReducer::ReduceDataViewAccess(Node* node, DataViewAccess access,
// We only deal with DataViews here whose [[ByteLength]] is at least
// {element_size}, as for all other DataViews it'll be out-of-bounds.
JSDataViewRef dataview = m.Ref(broker()).AsJSDataView();
- if (dataview.byte_length() < element_size) return NoChange();
+ size_t length = dataview.byte_length();
+ if (length < element_size) return NoChange();
- // Check that the {offset} is within range of the {byte_length}.
- Node* byte_length =
- jsgraph()->Constant(dataview.byte_length() - (element_size - 1));
+ // Check that the {offset} is within range of the {length}.
+ Node* byte_length = jsgraph()->Constant(length - (element_size - 1));
offset = effect = graph()->NewNode(simplified()->CheckBounds(p.feedback()),
offset, byte_length, effect, control);
} else {
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 139ef7dfa4..fe5af04aa8 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -64,6 +64,11 @@ class V8_EXPORT_PRIVATE JSCallReducer final : public AdvancedReducer {
// and does a final attempt to reduce the nodes in the waitlist.
void Finalize() final;
+ // JSCallReducer outsources much work to a graph assembler.
+ void RevisitForGraphAssembler(Node* node) { Revisit(node); }
+ Zone* ZoneForGraphAssembler() const { return temp_zone(); }
+ JSGraph* JSGraphForGraphAssembler() const { return jsgraph(); }
+
private:
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node,
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 9fe968825f..1f3169fad3 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -28,7 +28,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects.h"
-#include "torque-generated/exported-class-definitions-tq.h"
+#include "torque-generated/exported-class-definitions.h"
namespace v8 {
namespace internal {
@@ -1770,10 +1770,11 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
if (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
FixedDoubleArrayRef elements = boilerplate_elements.AsFixedDoubleArray();
for (int i = 0; i < elements_length; ++i) {
- if (elements.is_the_hole(i)) {
+ Float64 value = elements.get(i);
+ if (value.is_hole_nan()) {
elements_values[i] = jsgraph()->TheHoleConstant();
} else {
- elements_values[i] = jsgraph()->Constant(elements.get_scalar(i));
+ elements_values[i] = jsgraph()->Constant(value.get_scalar());
}
}
} else {
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 0950248087..81bafa6183 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -315,6 +315,13 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
}
}
+void JSGenericLowering::LowerJSLoadNamedFromSuper(Node* node) {
+ JSLoadNamedFromSuperNode n(node);
+ NamedAccess const& p = n.Parameters();
+ node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.name()));
+ ReplaceWithRuntimeCall(node, Runtime::kLoadFromSuper);
+}
+
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
JSLoadGlobalNode n(node);
const LoadGlobalParameters& p = n.Parameters();
@@ -522,9 +529,14 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
+ auto interface_descriptor = ArrayConstructorDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
- zone(), ArrayConstructorDescriptor{}, arity + 1,
- CallDescriptor::kNeedsFrameState, node->op()->properties());
+ zone(), interface_descriptor, arity + 1, CallDescriptor::kNeedsFrameState,
+ node->op()->properties());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
Node* stub_arity = jsgraph()->Int32Constant(arity);
MaybeHandle<AllocationSite> const maybe_site = p.site();
@@ -773,6 +785,10 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -801,12 +817,20 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
arg_count + kReceiver + kMaybeFeedbackVector;
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kConstruct_WithFeedback);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
+ kMaybeFeedbackVector);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
Node* receiver = jsgraph()->UndefinedConstant();
+#ifdef V8_REVERSE_JSARGS
+ Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
+#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -815,10 +839,16 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone(), 5, feedback_vector);
+ node->InsertInput(zone(), 6, receiver);
+ // After: {code, target, new_target, arity, slot, vector, receiver,
+ // ...args}.
+#else
node->InsertInput(zone(), 5, receiver);
-
// After: {code, target, new_target, arity, slot, receiver, ...args,
// vector}.
+#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
@@ -857,12 +887,19 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
arg_count - kArgumentList + kReceiver + kMaybeFeedbackVector;
Callable callable = Builtins::CallableFor(
isolate(), Builtins::kConstructWithArrayLike_WithFeedback);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
+ kMaybeFeedbackVector);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* receiver = jsgraph()->UndefinedConstant();
Node* slot = jsgraph()->Int32Constant(p.feedback().index());
-
+#ifdef V8_REVERSE_JSARGS
+ Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
+#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
// the receiver is implicitly placed on the stack and is thus inserted
@@ -870,16 +907,26 @@ void JSGenericLowering::LowerJSConstructWithArrayLike(Node* node) {
// TODO(jgruber): Implement a simpler way to specify these mutations.
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 4, slot);
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone(), 5, feedback_vector);
+ node->InsertInput(zone(), 6, receiver);
+ // After: {code, target, new_target, arguments_list, slot, vector,
+ // receiver}.
+#else
node->InsertInput(zone(), 5, receiver);
-
// After: {code, target, new_target, arguments_list, slot, receiver,
// vector}.
+#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
const int stack_argument_count = arg_count - kArgumentList + kReceiver;
Callable callable =
Builtins::CallableFor(isolate(), Builtins::kConstructWithArrayLike);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -911,6 +958,11 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
arg_count + kReceiver + kMaybeFeedbackVector;
Callable callable = Builtins::CallableFor(
isolate(), Builtins::kConstructWithSpread_WithFeedback);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
+ kTheSpread + kMaybeFeedbackVector);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -920,6 +972,10 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
// on the stack here.
Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
Node* receiver = jsgraph()->UndefinedConstant();
+#ifdef V8_REVERSE_JSARGS
+ Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
+ Node* spread = node->RemoveInput(n.LastArgumentIndex());
+#endif
// Register argument inputs are followed by stack argument inputs (such as
// feedback_vector). Both are listed in ascending order. Note that
@@ -929,15 +985,26 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 3, stub_arity);
node->InsertInput(zone(), 4, slot);
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone(), 5, spread);
+ node->InsertInput(zone(), 6, feedback_vector);
+ node->InsertInput(zone(), 7, receiver);
+ // After: {code, target, new_target, arity, slot, spread, vector, receiver,
+ // ...args}.
+#else
node->InsertInput(zone(), 5, receiver);
-
// After: {code, target, new_target, arity, slot, receiver, ...args, spread,
// vector}.
+#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
const int stack_argument_count = arg_count + kReceiver - kTheSpread;
Callable callable = CodeFactory::ConstructWithSpread(isolate());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -1091,6 +1158,11 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
arg_count - kTheSpread + kReceiver + kMaybeFeedbackVector;
Callable callable = Builtins::CallableFor(
isolate(), Builtins::kCallWithSpread_WithFeedback);
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(),
+ kMaybeFeedbackVector);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
@@ -1107,22 +1179,29 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
-
+#ifdef V8_REVERSE_JSARGS
+ Node* feedback_vector = node->RemoveInput(n.FeedbackVectorIndex());
+#endif
Node* spread = node->RemoveInput(n.LastArgumentIndex());
-
- // Now: {target, receiver, ...args, vector}.
-
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
node->InsertInput(zone(), 3, spread);
node->InsertInput(zone(), 4, slot);
-
+#ifdef V8_REVERSE_JSARGS
+ node->InsertInput(zone(), 5, feedback_vector);
+ // After: {code, target, arity, spread, slot, vector, receiver, ...args}.
+#else
// After: {code, target, arity, spread, slot, receiver, ...args, vector}.
+#endif
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
const int stack_argument_count = arg_count - kTheSpread + kReceiver;
Callable callable = CodeFactory::CallWithSpread(isolate());
+ // If this fails, we might need to update the parameter reordering code
+ // to ensure that the additional arguments passed via stack are pushed
+ // between top of stack and JS arguments.
+ DCHECK_EQ(callable.descriptor().GetStackParameterCount(), 0);
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 6b8d9761ff..120f8ee21d 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -129,6 +129,9 @@ DEFINE_GETTER(BooleanMapConstant, HeapConstant(factory()->boolean_map()))
DEFINE_GETTER(ToNumberBuiltinConstant,
HeapConstant(BUILTIN_CODE(isolate(), ToNumber)))
+DEFINE_GETTER(PlainPrimitiveToNumberBuiltinConstant,
+ HeapConstant(BUILTIN_CODE(isolate(), PlainPrimitiveToNumber)))
+
DEFINE_GETTER(EmptyFixedArrayConstant,
HeapConstant(factory()->empty_fixed_array()))
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index b055f399df..a17b615b3b 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -85,6 +85,7 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
V(BigIntMapConstant) \
V(BooleanMapConstant) \
V(ToNumberBuiltinConstant) \
+ V(PlainPrimitiveToNumberBuiltinConstant) \
V(EmptyFixedArrayConstant) \
V(EmptyStringConstant) \
V(FixedArrayMapConstant) \
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index 123297bc73..be05929bbc 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -34,7 +34,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/template-objects-inl.h"
#include "src/objects/templates.h"
-#include "src/utils/boxed-float.h"
#include "src/utils/utils.h"
namespace v8 {
@@ -45,7 +44,10 @@ namespace compiler {
#define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x)
#define FORWARD_DECL(Name) class Name##Data;
-HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
+// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
+// removed.
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
#undef FORWARD_DECL
// There are three kinds of ObjectData values.
@@ -71,15 +73,19 @@ enum ObjectDataKind {
kSmi,
kSerializedHeapObject,
kUnserializedHeapObject,
+ kNeverSerializedHeapObject,
kUnserializedReadOnlyHeapObject
};
-class AllowHandleAllocationIf {
+class AllowHandleAllocationIfNeeded {
public:
- explicit AllowHandleAllocationIf(ObjectDataKind kind,
- JSHeapBroker::BrokerMode mode) {
+ explicit AllowHandleAllocationIfNeeded(ObjectDataKind kind,
+ JSHeapBroker::BrokerMode mode,
+ bool direct_heap_access = false) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
- kind == kUnserializedReadOnlyHeapObject);
+ kind == kUnserializedReadOnlyHeapObject ||
+ kind == kNeverSerializedHeapObject ||
+ (direct_heap_access && kind == kSerializedHeapObject));
if (kind == kUnserializedHeapObject) maybe_allow_handle_.emplace();
}
@@ -87,31 +93,33 @@ class AllowHandleAllocationIf {
base::Optional<AllowHandleAllocation> maybe_allow_handle_;
};
-class AllowHandleDereferenceIf {
+class AllowHandleDereferenceIfNeeded {
public:
- explicit AllowHandleDereferenceIf(ObjectDataKind kind,
- JSHeapBroker::BrokerMode mode) {
+ explicit AllowHandleDereferenceIfNeeded(ObjectDataKind kind,
+ JSHeapBroker::BrokerMode mode,
+ bool direct_heap_access = false)
+ : AllowHandleDereferenceIfNeeded(kind) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
- kind == kUnserializedReadOnlyHeapObject);
- if (kind == kUnserializedHeapObject ||
- kind == kUnserializedReadOnlyHeapObject)
- maybe_allow_handle_.emplace();
+ kind == kUnserializedReadOnlyHeapObject ||
+ kind == kNeverSerializedHeapObject ||
+ (direct_heap_access && kind == kSerializedHeapObject));
}
- explicit AllowHandleDereferenceIf(ObjectDataKind kind) {
+ explicit AllowHandleDereferenceIfNeeded(ObjectDataKind kind) {
if (kind == kUnserializedHeapObject ||
- kind == kUnserializedReadOnlyHeapObject)
+ kind == kUnserializedReadOnlyHeapObject) {
maybe_allow_handle_.emplace();
+ }
}
private:
base::Optional<AllowHandleDereference> maybe_allow_handle_;
};
-class AllowHeapAllocationIf {
+class AllowHeapAllocationIfNeeded {
public:
- explicit AllowHeapAllocationIf(ObjectDataKind kind,
- JSHeapBroker::BrokerMode mode) {
+ explicit AllowHeapAllocationIfNeeded(ObjectDataKind kind,
+ JSHeapBroker::BrokerMode mode) {
DCHECK_IMPLIES(mode == JSHeapBroker::BrokerMode::kSerialized,
kind == kUnserializedReadOnlyHeapObject);
if (kind == kUnserializedHeapObject) maybe_allow_handle_.emplace();
@@ -154,20 +162,29 @@ class ObjectData : public ZoneObject {
broker->mode() == JSHeapBroker::kSerializing,
broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
- IsReadOnlyHeapObject(*object));
+ (kind == kUnserializedReadOnlyHeapObject &&
+ IsReadOnlyHeapObject(*object)) ||
+ kind == kNeverSerializedHeapObject);
}
-#define DECLARE_IS_AND_AS(Name) \
- bool Is##Name() const; \
- Name##Data* As##Name();
- HEAP_BROKER_OBJECT_LIST(DECLARE_IS_AND_AS)
-#undef DECLARE_IS_AND_AS
+#define DECLARE_IS(Name) bool Is##Name() const;
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
+#undef DECLARE_IS
+
+#define DECLARE_AS(Name) Name##Data* As##Name();
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
+ // TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
+ // removed.
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
+#undef DECLARE_AS
Handle<Object> object() const { return object_; }
ObjectDataKind kind() const { return kind_; }
bool is_smi() const { return kind_ == kSmi; }
bool should_access_heap() const {
return kind_ == kUnserializedHeapObject ||
+ kind_ == kNeverSerializedHeapObject ||
kind_ == kUnserializedReadOnlyHeapObject;
}
@@ -216,7 +233,7 @@ class PropertyCellData : public HeapObjectData {
// TODO(mslekova): Once we have real-world usage data, we might want to
// reimplement this as sorted vector instead, to reduce the memory overhead.
-typedef ZoneMap<MapData*, HolderLookupResult> KnownReceiversMap;
+typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap;
class FunctionTemplateInfoData : public HeapObjectData {
public:
@@ -228,7 +245,7 @@ class FunctionTemplateInfoData : public HeapObjectData {
bool has_call_code() const { return has_call_code_; }
void SerializeCallCode(JSHeapBroker* broker);
- CallHandlerInfoData* call_code() const { return call_code_; }
+ ObjectData* call_code() const { return call_code_; }
Address c_function() const { return c_function_; }
const CFunctionInfo* c_signature() const { return c_signature_; }
KnownReceiversMap& known_receivers() { return known_receivers_; }
@@ -238,7 +255,7 @@ class FunctionTemplateInfoData : public HeapObjectData {
bool accept_any_receiver_ = false;
bool has_call_code_ = false;
- CallHandlerInfoData* call_code_ = nullptr;
+ ObjectData* call_code_ = nullptr;
const Address c_function_;
const CFunctionInfo* const c_signature_;
KnownReceiversMap known_receivers_;
@@ -310,9 +327,10 @@ void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
TraceScope tracer(broker, this,
"FunctionTemplateInfoData::SerializeCallCode");
auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
- call_code_ = broker->GetOrCreateData(function_template_info->call_code())
- ->AsCallHandlerInfo();
- call_code_->Serialize(broker);
+ call_code_ = broker->GetOrCreateData(function_template_info->call_code());
+ if (!call_code_->should_access_heap()) {
+ call_code_->AsCallHandlerInfo()->Serialize(broker);
+ }
}
void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
@@ -451,7 +469,8 @@ base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
if (it.state() == LookupIterator::DATA &&
(!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return ObjectRef(broker, it.GetDataValue());
+ return ObjectRef(broker,
+ broker->CanonicalPersistentHandle(it.GetDataValue()));
}
return base::nullopt;
}
@@ -519,7 +538,7 @@ class JSTypedArrayData : public JSObjectData {
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
- HeapObjectData* buffer() const { return buffer_; }
+ ObjectData* buffer() const { return buffer_; }
private:
bool const is_on_heap_;
@@ -527,7 +546,7 @@ class JSTypedArrayData : public JSObjectData {
void* const data_ptr_;
bool serialized_ = false;
- HeapObjectData* buffer_ = nullptr;
+ ObjectData* buffer_ = nullptr;
};
JSTypedArrayData::JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -546,7 +565,7 @@ void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
if (!is_on_heap()) {
DCHECK_NULL(buffer_);
- buffer_ = broker->GetOrCreateData(typed_array->buffer())->AsHeapObject();
+ buffer_ = broker->GetOrCreateData(typed_array->buffer());
}
}
@@ -555,7 +574,9 @@ class ArrayBoilerplateDescriptionData : public HeapObjectData {
ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ArrayBoilerplateDescription> object)
: HeapObjectData(broker, storage, object),
- constants_elements_length_(object->constant_elements().length()) {}
+ constants_elements_length_(object->constant_elements().length()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
int constants_elements_length() const { return constants_elements_length_; }
@@ -567,7 +588,9 @@ class ObjectBoilerplateDescriptionData : public HeapObjectData {
public:
ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<ObjectBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object), size_(object->size()) {}
+ : HeapObjectData(broker, storage, object), size_(object->size()) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
int size() const { return size_; }
@@ -581,11 +604,9 @@ class JSDataViewData : public JSObjectData {
Handle<JSDataView> object);
size_t byte_length() const { return byte_length_; }
- size_t byte_offset() const { return byte_offset_; }
private:
size_t const byte_length_;
- size_t const byte_offset_;
};
class JSBoundFunctionData : public JSObjectData {
@@ -624,13 +645,13 @@ class JSFunctionData : public JSObjectData {
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
- ContextData* context() const { return context_; }
- NativeContextData* native_context() const { return native_context_; }
+ ObjectData* context() const { return context_; }
+ ObjectData* native_context() const { return native_context_; }
ObjectData* initial_map() const { return initial_map_; }
ObjectData* prototype() const { return prototype_; }
- SharedFunctionInfoData* shared() const { return shared_; }
- FeedbackVectorData* feedback_vector() const { return feedback_vector_; }
- CodeData* code() const { return code_; }
+ ObjectData* shared() const { return shared_; }
+ ObjectData* feedback_vector() const { return feedback_vector_; }
+ ObjectData* code() const { return code_; }
int initial_map_instance_size_with_min_slack() const {
CHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -645,13 +666,13 @@ class JSFunctionData : public JSObjectData {
bool serialized_ = false;
- ContextData* context_ = nullptr;
- NativeContextData* native_context_ = nullptr;
+ ObjectData* context_ = nullptr;
+ ObjectData* native_context_ = nullptr;
ObjectData* initial_map_ = nullptr;
ObjectData* prototype_ = nullptr;
- SharedFunctionInfoData* shared_ = nullptr;
- FeedbackVectorData* feedback_vector_ = nullptr;
- CodeData* code_ = nullptr;
+ ObjectData* shared_ = nullptr;
+ ObjectData* feedback_vector_ = nullptr;
+ ObjectData* code_ = nullptr;
int initial_map_instance_size_with_min_slack_;
};
@@ -683,7 +704,8 @@ class HeapNumberData : public HeapObjectData {
public:
HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
Handle<HeapNumber> object)
- : HeapObjectData(broker, storage, object), value_(object->value()) {}
+ : HeapObjectData(broker, storage, object), value_(object->value()) {
+ }
double value() const { return value_; }
@@ -696,10 +718,8 @@ class ContextData : public HeapObjectData {
ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object);
- // {previous} will return the closest valid context possible to desired
- // {depth}, decrementing {depth} for each previous link successfully followed.
- ContextData* previous(
- JSHeapBroker* broker, size_t* depth,
+ ObjectData* previous(
+ JSHeapBroker* broker,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
// Returns nullptr if the slot index isn't valid or wasn't serialized,
@@ -710,32 +730,22 @@ class ContextData : public HeapObjectData {
private:
ZoneMap<int, ObjectData*> slots_;
- ContextData* previous_ = nullptr;
+ ObjectData* previous_ = nullptr;
};
ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
Handle<Context> object)
: HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
-ContextData* ContextData::previous(JSHeapBroker* broker, size_t* depth,
- SerializationPolicy policy) {
- if (*depth == 0) return this;
-
+ObjectData* ContextData::previous(JSHeapBroker* broker,
+ SerializationPolicy policy) {
if (policy == SerializationPolicy::kSerializeIfNeeded &&
previous_ == nullptr) {
TraceScope tracer(broker, this, "ContextData::previous");
Handle<Context> context = Handle<Context>::cast(object());
- Object prev = context->unchecked_previous();
- if (prev.IsContext()) {
- previous_ = broker->GetOrCreateData(prev)->AsContext();
- }
- }
-
- if (previous_ != nullptr) {
- *depth = *depth - 1;
- return previous_->previous(broker, depth, policy);
+ previous_ = broker->GetOrCreateData(context->unchecked_previous());
}
- return this;
+ return previous_;
}
ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
@@ -763,16 +773,16 @@ ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
class NativeContextData : public ContextData {
public:
#define DECL_ACCESSOR(type, name) \
- type##Data* name() const { return name##_; }
+ ObjectData* name() const { return name##_; }
BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
#undef DECL_ACCESSOR
- const ZoneVector<MapData*>& function_maps() const {
+ const ZoneVector<ObjectData*>& function_maps() const {
CHECK(serialized_);
return function_maps_;
}
- ScopeInfoData* scope_info() const {
+ ObjectData* scope_info() const {
CHECK(serialized_);
return scope_info_;
}
@@ -783,11 +793,11 @@ class NativeContextData : public ContextData {
private:
bool serialized_ = false;
-#define DECL_MEMBER(type, name) type##Data* name##_ = nullptr;
+#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
- ZoneVector<MapData*> function_maps_;
- ScopeInfoData* scope_info_ = nullptr;
+ ZoneVector<ObjectData*> function_maps_;
+ ObjectData* scope_info_ = nullptr;
};
class NameData : public HeapObjectData {
@@ -806,7 +816,7 @@ class StringData : public NameData {
bool is_external_string() const { return is_external_string_; }
bool is_seq_string() const { return is_seq_string_; }
- StringData* GetCharAsString(
+ ObjectData* GetCharAsString(
JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
@@ -820,7 +830,7 @@ class StringData : public NameData {
// Known individual characters as strings, corresponding to the semantics of
// element access (s[i]). The first pair component is always less than
// {length_}. The second component is never nullptr.
- ZoneVector<std::pair<uint32_t, StringData*>> chars_as_strings_;
+ ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
static constexpr int kMaxLengthForDoubleConversion = 23;
};
@@ -859,7 +869,7 @@ class InternalizedStringData : public StringData {
uint32_t array_index_;
};
-StringData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
+ObjectData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
SerializationPolicy policy) {
if (index >= static_cast<uint32_t>(length())) return nullptr;
@@ -874,8 +884,7 @@ StringData* StringData::GetCharAsString(JSHeapBroker* broker, uint32_t index,
base::Optional<ObjectRef> element =
GetOwnElementFromHeap(broker, object(), index, true);
- StringData* result =
- element.has_value() ? element->data()->AsString() : nullptr;
+ ObjectData* result = element.has_value() ? element->data() : nullptr;
chars_as_strings_.push_back({index, result});
return result;
}
@@ -987,7 +996,7 @@ class AllocationSiteData : public HeapObjectData {
AllocationType GetAllocationType() const { return GetAllocationType_; }
ObjectData* nested_site() const { return nested_site_; }
bool IsFastLiteral() const { return IsFastLiteral_; }
- JSObjectData* boilerplate() const { return boilerplate_; }
+ ObjectData* boilerplate() const { return boilerplate_; }
// These are only valid if PointsToLiteral is false.
ElementsKind GetElementsKind() const { return GetElementsKind_; }
@@ -998,7 +1007,7 @@ class AllocationSiteData : public HeapObjectData {
AllocationType const GetAllocationType_;
ObjectData* nested_site_ = nullptr;
bool IsFastLiteral_ = false;
- JSObjectData* boilerplate_ = nullptr;
+ ObjectData* boilerplate_ = nullptr;
ElementsKind GetElementsKind_ = NO_ELEMENTS;
bool CanInlineCall_ = false;
bool serialized_boilerplate_ = false;
@@ -1008,7 +1017,8 @@ class BigIntData : public HeapObjectData {
public:
BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object)
: HeapObjectData(broker, storage, object),
- as_uint64_(object->AsUint64(nullptr)) {}
+ as_uint64_(object->AsUint64(nullptr)) {
+ }
uint64_t AsUint64() const { return as_uint64_; }
@@ -1025,11 +1035,11 @@ class ScriptContextTableData : public HeapObjectData {
};
struct PropertyDescriptor {
- NameData* key = nullptr;
+ ObjectData* key = nullptr;
ObjectData* value = nullptr;
PropertyDetails details = PropertyDetails::Empty();
FieldIndex field_index;
- MapData* field_owner = nullptr;
+ ObjectData* field_owner = nullptr;
ObjectData* field_type = nullptr;
bool is_unboxed_double_field = false;
};
@@ -1069,7 +1079,7 @@ class MapData : public HeapObjectData {
// Extra information.
void SerializeElementsKindGeneralizations(JSHeapBroker* broker);
- const ZoneVector<MapData*>& elements_kind_generalizations() const {
+ const ZoneVector<ObjectData*>& elements_kind_generalizations() const {
CHECK(serialized_elements_kind_generalizations_);
return elements_kind_generalizations_;
}
@@ -1080,12 +1090,16 @@ class MapData : public HeapObjectData {
InternalIndex descriptor_index);
void SerializeOwnDescriptors(JSHeapBroker* broker);
ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
+ // TODO(neis): This code needs to be changed to allow for ObjectData* instance
+ // descriptors. However, this is likely to require a non-trivial refactoring
+ // of how maps are serialized because actual instance descriptors don't
+ // contain information about owner maps.
DescriptorArrayData* instance_descriptors() const {
return instance_descriptors_;
}
void SerializeRootMap(JSHeapBroker* broker);
- MapData* FindRootMap() const;
+ ObjectData* FindRootMap() const;
void SerializeConstructor(JSHeapBroker* broker);
ObjectData* GetConstructor() const {
@@ -1094,7 +1108,7 @@ class MapData : public HeapObjectData {
}
void SerializeBackPointer(JSHeapBroker* broker);
- HeapObjectData* GetBackPointer() const {
+ ObjectData* GetBackPointer() const {
CHECK(serialized_backpointer_);
return backpointer_;
}
@@ -1128,7 +1142,7 @@ class MapData : public HeapObjectData {
bool const is_abandoned_prototype_map_;
bool serialized_elements_kind_generalizations_ = false;
- ZoneVector<MapData*> elements_kind_generalizations_;
+ ZoneVector<ObjectData*> elements_kind_generalizations_;
bool serialized_own_descriptors_ = false;
DescriptorArrayData* instance_descriptors_ = nullptr;
@@ -1137,13 +1151,13 @@ class MapData : public HeapObjectData {
ObjectData* constructor_ = nullptr;
bool serialized_backpointer_ = false;
- HeapObjectData* backpointer_ = nullptr;
+ ObjectData* backpointer_ = nullptr;
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
bool serialized_root_map_ = false;
- MapData* root_map_ = nullptr;
+ ObjectData* root_map_ = nullptr;
bool serialized_for_element_load_ = false;
@@ -1152,7 +1166,9 @@ class MapData : public HeapObjectData {
AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
Handle<AccessorInfo> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+}
AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
ObjectData** storage,
@@ -1178,12 +1194,14 @@ void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
CHECK(IsFastLiteral_);
DCHECK_NULL(boilerplate_);
- boilerplate_ = broker->GetOrCreateData(site->boilerplate())->AsJSObject();
- boilerplate_->SerializeAsBoilerplate(broker);
+ boilerplate_ = broker->GetOrCreateData(site->boilerplate());
+ if (!boilerplate_->should_access_heap()) {
+ boilerplate_->AsJSObject()->SerializeAsBoilerplate(broker);
+ }
DCHECK_NULL(nested_site_);
nested_site_ = broker->GetOrCreateData(site->nested_site());
- if (nested_site_->IsAllocationSite()) {
+ if (nested_site_->IsAllocationSite() && !nested_site_->should_access_heap()) {
nested_site_->AsAllocationSite()->SerializeBoilerplate(broker);
}
}
@@ -1198,13 +1216,13 @@ HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
// meta map (whose map is itself), this member has not yet been
// initialized.
map_(broker->GetOrCreateData(object->map())) {
- CHECK(broker->SerializingAllowed());
+ CHECK_EQ(broker->mode(), JSHeapBroker::kSerializing);
}
InstanceType HeapObjectData::GetMapInstanceType() const {
ObjectData* map_data = map();
if (map_data->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(kind());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(kind());
return Handle<Map>::cast(map_data->object())->instance_type();
}
return map_data->AsMap()->instance_type();
@@ -1290,17 +1308,15 @@ void JSFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(feedback_vector_);
DCHECK_NULL(code_);
- context_ = broker->GetOrCreateData(function->context())->AsContext();
- native_context_ =
- broker->GetOrCreateData(function->native_context())->AsNativeContext();
- shared_ = broker->GetOrCreateData(function->shared())->AsSharedFunctionInfo();
+ context_ = broker->GetOrCreateData(function->context());
+ native_context_ = broker->GetOrCreateData(function->native_context());
+ shared_ = broker->GetOrCreateData(function->shared());
feedback_vector_ = has_feedback_vector()
? broker->GetOrCreateData(function->feedback_vector())
- ->AsFeedbackVector()
: nullptr;
- code_ = broker->GetOrCreateData(function->code())->AsCode();
+ code_ = broker->GetOrCreateData(function->code());
initial_map_ = has_initial_map()
- ? broker->GetOrCreateData(function->initial_map())->AsMap()
+ ? broker->GetOrCreateData(function->initial_map())
: nullptr;
prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
: nullptr;
@@ -1336,8 +1352,7 @@ void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
Handle<Map> target =
Map::AsElementsKind(broker->isolate(), self.object(), to_kind);
- elements_kind_generalizations_.push_back(
- broker->GetOrCreateData(target)->AsMap());
+ elements_kind_generalizations_.push_back(broker->GetOrCreateData(target));
}
}
}
@@ -1359,16 +1374,16 @@ class FeedbackCellData : public HeapObjectData {
FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object);
- HeapObjectData* value() const { return value_; }
+ ObjectData* value() const { return value_; }
private:
- HeapObjectData* const value_;
+ ObjectData* const value_;
};
FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
Handle<FeedbackCell> object)
: HeapObjectData(broker, storage, object),
- value_(broker->GetOrCreateData(object->value())->AsHeapObject()) {}
+ value_(broker->GetOrCreateData(object->value())) {}
class FeedbackVectorData : public HeapObjectData {
public:
@@ -1377,21 +1392,20 @@ class FeedbackVectorData : public HeapObjectData {
double invocation_count() const { return invocation_count_; }
- SharedFunctionInfoData* shared_function_info() {
+ ObjectData* shared_function_info() {
CHECK(serialized_);
return shared_function_info_;
}
void Serialize(JSHeapBroker* broker);
bool serialized() const { return serialized_; }
- FeedbackCellData* GetClosureFeedbackCell(JSHeapBroker* broker,
- int index) const;
+ ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
private:
double const invocation_count_;
bool serialized_ = false;
- SharedFunctionInfoData* shared_function_info_;
+ ObjectData* shared_function_info_;
ZoneVector<ObjectData*> closure_feedback_cell_array_;
};
@@ -1402,8 +1416,8 @@ FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
invocation_count_(object->invocation_count()),
closure_feedback_cell_array_(broker->zone()) {}
-FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell(
- JSHeapBroker* broker, int index) const {
+ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
+ int index) const {
CHECK_GE(index, 0);
size_t cell_array_size = closure_feedback_cell_array_.size();
@@ -1414,7 +1428,7 @@ FeedbackCellData* FeedbackVectorData::GetClosureFeedbackCell(
return nullptr;
}
CHECK_LT(index, cell_array_size);
- return closure_feedback_cell_array_[index]->AsFeedbackCell();
+ return closure_feedback_cell_array_[index];
}
void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
@@ -1425,7 +1439,7 @@ void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
broker->isolate());
- shared_function_info_ = broker->GetOrCreateData(sfi)->AsSharedFunctionInfo();
+ shared_function_info_ = broker->GetOrCreateData(sfi);
DCHECK(closure_feedback_cell_array_.empty());
int length = vector->closure_feedback_cell_array().length();
closure_feedback_cell_array_.reserve(length);
@@ -1467,8 +1481,7 @@ class FixedArrayData : public FixedArrayBaseData {
JSDataViewData::JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSDataView> object)
: JSObjectData(broker, storage, object),
- byte_length_(object->byte_length()),
- byte_offset_(object->byte_offset()) {}
+ byte_length_(object->byte_length()) {}
JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
ObjectData** storage,
@@ -1485,10 +1498,12 @@ void JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
DCHECK_NULL(bound_target_function_);
bound_target_function_ =
broker->GetOrCreateData(function->bound_target_function());
- if (bound_target_function_->IsJSBoundFunction()) {
- bound_target_function_->AsJSBoundFunction()->Serialize(broker);
- } else if (bound_target_function_->IsJSFunction()) {
- bound_target_function_->AsJSFunction()->Serialize(broker);
+ if (!bound_target_function_->should_access_heap()) {
+ if (bound_target_function_->IsJSBoundFunction()) {
+ bound_target_function_->AsJSBoundFunction()->Serialize(broker);
+ } else if (bound_target_function_->IsJSFunction()) {
+ bound_target_function_->AsJSFunction()->Serialize(broker);
+ }
}
DCHECK_NULL(bound_arguments_);
@@ -1547,7 +1562,8 @@ class FixedDoubleArrayData : public FixedArrayBaseData {
FixedDoubleArrayData::FixedDoubleArrayData(JSHeapBroker* broker,
ObjectData** storage,
Handle<FixedDoubleArray> object)
- : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
+ : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {
+}
void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
if (serialized_contents_) return;
@@ -1567,6 +1583,12 @@ void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
class BytecodeArrayData : public FixedArrayBaseData {
public:
+ int register_count() const { return register_count_; }
+ int parameter_count() const { return parameter_count_; }
+ interpreter::Register incoming_new_target_or_generator_register() const {
+ return incoming_new_target_or_generator_register_;
+ }
+
Handle<Object> GetConstantAtIndex(int index, Isolate* isolate) const {
return constant_pool_[index]->object();
}
@@ -1599,9 +1621,17 @@ class BytecodeArrayData : public FixedArrayBaseData {
BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<BytecodeArray> object)
: FixedArrayBaseData(broker, storage, object),
+ register_count_(object->register_count()),
+ parameter_count_(object->parameter_count()),
+ incoming_new_target_or_generator_register_(
+ object->incoming_new_target_or_generator_register()),
constant_pool_(broker->zone()) {}
private:
+ int const register_count_;
+ int const parameter_count_;
+ interpreter::Register const incoming_new_target_or_generator_register_;
+
bool is_serialized_for_compilation_ = false;
ZoneVector<ObjectData*> constant_pool_;
};
@@ -1671,7 +1701,7 @@ class ScopeInfoData : public HeapObjectData {
bool has_outer_scope_info() const { return has_outer_scope_info_; }
int flags() const { return flags_; }
- ScopeInfoData* outer_scope_info() const { return outer_scope_info_; }
+ ObjectData* outer_scope_info() const { return outer_scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
private:
@@ -1680,7 +1710,7 @@ class ScopeInfoData : public HeapObjectData {
int const flags_;
// Only serialized via SerializeScopeInfoChain.
- ScopeInfoData* outer_scope_info_;
+ ObjectData* outer_scope_info_;
};
ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
@@ -1694,11 +1724,11 @@ ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (outer_scope_info_) return;
if (!has_outer_scope_info_) return;
- outer_scope_info_ =
- broker
- ->GetOrCreateData(Handle<ScopeInfo>::cast(object())->OuterScopeInfo())
- ->AsScopeInfo();
- outer_scope_info_->SerializeScopeInfoChain(broker);
+ outer_scope_info_ = broker->GetOrCreateData(
+ Handle<ScopeInfo>::cast(object())->OuterScopeInfo());
+ if (!outer_scope_info_->should_access_heap()) {
+ outer_scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
+ }
}
class SharedFunctionInfoData : public HeapObjectData {
@@ -1708,21 +1738,19 @@ class SharedFunctionInfoData : public HeapObjectData {
int builtin_id() const { return builtin_id_; }
int context_header_size() const { return context_header_size_; }
- BytecodeArrayData* GetBytecodeArray() const { return GetBytecodeArray_; }
+ ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
- ScopeInfoData* scope_info() const { return scope_info_; }
+ ObjectData* scope_info() const { return scope_info_; }
void SerializeScopeInfoChain(JSHeapBroker* broker);
- FunctionTemplateInfoData* function_template_info() const {
- return function_template_info_;
- }
- JSArrayData* GetTemplateObject(FeedbackSlot slot) const {
+ ObjectData* function_template_info() const { return function_template_info_; }
+ ObjectData* GetTemplateObject(FeedbackSlot slot) const {
auto lookup_it = template_objects_.find(slot.ToInt());
if (lookup_it != template_objects_.cend()) {
return lookup_it->second;
}
return nullptr;
}
- void SetTemplateObject(FeedbackSlot slot, JSArrayData* object) {
+ void SetTemplateObject(FeedbackSlot slot, ObjectData* object) {
CHECK(
template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
}
@@ -1735,13 +1763,13 @@ class SharedFunctionInfoData : public HeapObjectData {
private:
int const builtin_id_;
int context_header_size_;
- BytecodeArrayData* const GetBytecodeArray_;
+ ObjectData* const GetBytecodeArray_;
#define DECL_MEMBER(type, name) type const name##_;
BROKER_SFI_FIELDS(DECL_MEMBER)
#undef DECL_MEMBER
- FunctionTemplateInfoData* function_template_info_;
- ZoneMap<int, JSArrayData*> template_objects_;
- ScopeInfoData* scope_info_;
+ ObjectData* function_template_info_;
+ ZoneMap<int, ObjectData*> template_objects_;
+ ObjectData* scope_info_;
};
SharedFunctionInfoData::SharedFunctionInfoData(
@@ -1754,7 +1782,6 @@ SharedFunctionInfoData::SharedFunctionInfoData(
GetBytecodeArray_(
object->HasBytecodeArray()
? broker->GetOrCreateData(object->GetBytecodeArray())
- ->AsBytecodeArray()
: nullptr)
#define INIT_MEMBER(type, name) , name##_(object->name())
BROKER_SFI_FIELDS(INIT_MEMBER)
@@ -1770,23 +1797,17 @@ SharedFunctionInfoData::SharedFunctionInfoData(
void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
JSHeapBroker* broker) {
if (function_template_info_) return;
-
- function_template_info_ =
- broker
- ->GetOrCreateData(handle(
- Handle<SharedFunctionInfo>::cast(object())->function_data(),
- broker->isolate()))
- ->AsFunctionTemplateInfo();
+ function_template_info_ = broker->GetOrCreateData(
+ Handle<SharedFunctionInfo>::cast(object())->function_data());
}
void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
if (scope_info_) return;
- scope_info_ =
- broker
- ->GetOrCreateData(
- Handle<SharedFunctionInfo>::cast(object())->scope_info())
- ->AsScopeInfo();
- scope_info_->SerializeScopeInfoChain(broker);
+ scope_info_ = broker->GetOrCreateData(
+ Handle<SharedFunctionInfo>::cast(object())->scope_info());
+ if (!scope_info_->should_access_heap()) {
+ scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
+ }
}
class SourceTextModuleData : public HeapObjectData {
@@ -1795,13 +1816,13 @@ class SourceTextModuleData : public HeapObjectData {
Handle<SourceTextModule> object);
void Serialize(JSHeapBroker* broker);
- CellData* GetCell(JSHeapBroker* broker, int cell_index) const;
+ ObjectData* GetCell(JSHeapBroker* broker, int cell_index) const;
ObjectData* GetImportMeta(JSHeapBroker* broker) const;
private:
bool serialized_ = false;
- ZoneVector<CellData*> imports_;
- ZoneVector<CellData*> exports_;
+ ZoneVector<ObjectData*> imports_;
+ ZoneVector<ObjectData*> exports_;
ObjectData* import_meta_;
};
@@ -1813,15 +1834,15 @@ SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
exports_(broker->zone()),
import_meta_(nullptr) {}
-CellData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
- int cell_index) const {
+ObjectData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
+ int cell_index) const {
if (!serialized_) {
DCHECK(imports_.empty());
TRACE_BROKER_MISSING(broker,
"module cell " << cell_index << " on " << this);
return nullptr;
}
- CellData* cell;
+ ObjectData* cell;
switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
case SourceTextModuleDescriptor::kImport:
cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
@@ -1856,7 +1877,7 @@ void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
int const imports_length = imports->length();
imports_.reserve(imports_length);
for (int i = 0; i < imports_length; ++i) {
- imports_.push_back(broker->GetOrCreateData(imports->get(i))->AsCell());
+ imports_.push_back(broker->GetOrCreateData(imports->get(i)));
}
TRACE(broker, "Copied " << imports_.size() << " imports");
@@ -1865,7 +1886,7 @@ void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
int const exports_length = exports->length();
exports_.reserve(exports_length);
for (int i = 0; i < exports_length; ++i) {
- exports_.push_back(broker->GetOrCreateData(exports->get(i))->AsCell());
+ exports_.push_back(broker->GetOrCreateData(exports->get(i)));
}
TRACE(broker, "Copied " << exports_.size() << " exports");
@@ -1876,35 +1897,20 @@ void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
class CellData : public HeapObjectData {
public:
- CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object);
-
- void Serialize(JSHeapBroker* broker);
- ObjectData* value() { return value_; }
-
- private:
- ObjectData* value_ = nullptr;
+ CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
};
-CellData::CellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Cell> object)
- : HeapObjectData(broker, storage, object) {}
-
-void CellData::Serialize(JSHeapBroker* broker) {
- if (value_ != nullptr) return;
-
- TraceScope tracer(broker, this, "CellData::Serialize");
- auto cell = Handle<Cell>::cast(object());
- value_ = broker->GetOrCreateData(cell->value());
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object);
bool IsDetached() const { return is_detached_; }
- PropertyCellData* GetPropertyCell(
- JSHeapBroker* broker, NameData* name,
+ ObjectData* GetPropertyCell(
+ JSHeapBroker* broker, ObjectData* name,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
private:
@@ -1914,7 +1920,7 @@ class JSGlobalObjectData : public JSObjectData {
// (1) are known to exist as property cells on the global object, or
// (2) are known not to (possibly they don't exist at all).
// In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<NameData*, PropertyCellData*>> properties_;
+ ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
};
JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
@@ -1951,8 +1957,9 @@ base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
}
} // namespace
-PropertyCellData* JSGlobalObjectData::GetPropertyCell(
- JSHeapBroker* broker, NameData* name, SerializationPolicy policy) {
+ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
+ ObjectData* name,
+ SerializationPolicy policy) {
CHECK_NOT_NULL(name);
for (auto const& p : properties_) {
if (p.first == name) return p.second;
@@ -1963,12 +1970,14 @@ PropertyCellData* JSGlobalObjectData::GetPropertyCell(
return nullptr;
}
- PropertyCellData* result = nullptr;
+ ObjectData* result = nullptr;
base::Optional<PropertyCellRef> cell =
GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
if (cell.has_value()) {
- cell->Serialize();
- result = cell->data()->AsPropertyCell();
+ result = cell->data();
+ if (!result->should_access_heap()) {
+ result->AsPropertyCell()->Serialize(broker);
+ }
}
properties_.push_back({name, result});
return result;
@@ -1978,7 +1987,9 @@ class TemplateObjectDescriptionData : public HeapObjectData {
public:
TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
Handle<TemplateObjectDescription> object)
- : HeapObjectData(broker, storage, object) {}
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!FLAG_turbo_direct_heap_access);
+ }
};
class CodeData : public HeapObjectData {
@@ -1993,23 +2004,43 @@ class CodeData : public HeapObjectData {
unsigned const inlined_bytecode_size_;
};
-#define DEFINE_IS_AND_AS(Name) \
+#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
- AllowHandleDereferenceIf allow_handle_dereference(kind()); \
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(kind()); \
return object()->Is##Name(); \
} \
if (is_smi()) return false; \
InstanceType instance_type = \
static_cast<const HeapObjectData*>(this)->GetMapInstanceType(); \
return InstanceTypeChecker::Is##Name(instance_type); \
- } \
- Name##Data* ObjectData::As##Name() { \
- CHECK(Is##Name()); \
- return static_cast<Name##Data*>(this); \
}
-HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
-#undef DEFINE_IS_AND_AS
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
+#undef DEFINE_IS
+
+#define DEFINE_AS(Name) \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK(Is##Name()); \
+ CHECK_EQ(kind_, kSerializedHeapObject); \
+ return static_cast<Name##Data*>(this); \
+ }
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+#undef DEFINE_AS
+
+// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
+// removed.
+// This macro defines the Asxxx methods for NeverSerialized objects, which
+// should only be used with direct heap access off.
+#define DEFINE_AS(Name) \
+ Name##Data* ObjectData::As##Name() { \
+ DCHECK(!FLAG_turbo_direct_heap_access); \
+ CHECK(Is##Name()); \
+ CHECK_EQ(kind_, kSerializedHeapObject); \
+ return static_cast<Name##Data*>(this); \
+ }
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
+#undef DEFINE_AS
const JSObjectField& JSObjectData::GetInobjectField(int property_index) const {
CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
@@ -2058,7 +2089,7 @@ void MapData::SerializeBackPointer(JSHeapBroker* broker) {
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(backpointer_);
DCHECK(!map->IsContextMap());
- backpointer_ = broker->GetOrCreateData(map->GetBackPointer())->AsHeapObject();
+ backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
}
void MapData::SerializePrototype(JSHeapBroker* broker) {
@@ -2086,7 +2117,6 @@ void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
ObjectData* MapData::GetStrongValue(InternalIndex descriptor_index) const {
auto data = instance_descriptors_->contents().find(descriptor_index.as_int());
if (data == instance_descriptors_->contents().end()) return nullptr;
-
return data->second.value;
}
@@ -2111,28 +2141,26 @@ void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
CHECK_EQ(*descriptors, map->instance_descriptors());
PropertyDescriptor d;
- d.key =
- broker->GetOrCreateData(descriptors->GetKey(descriptor_index))->AsName();
+ d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
MaybeObject value = descriptors->GetValue(descriptor_index);
HeapObject obj;
if (value.GetHeapObjectIfStrong(&obj)) {
- d.value = broker->GetOrCreateData(handle(obj, broker->isolate()));
+ d.value = broker->GetOrCreateData(obj);
}
d.details = descriptors->GetDetails(descriptor_index);
if (d.details.location() == kField) {
d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index))
- ->AsMap();
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
d.field_type =
broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
d.is_unboxed_double_field = map->IsUnboxedDoubleField(d.field_index);
}
contents[descriptor_index.as_int()] = d;
- if (d.details.location() == kField) {
+ if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
// Recurse on the owner map.
- d.field_owner->SerializeOwnDescriptor(broker, descriptor_index);
+ d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
}
TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
@@ -2147,11 +2175,10 @@ void MapData::SerializeRootMap(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "MapData::SerializeRootMap");
Handle<Map> map = Handle<Map>::cast(object());
DCHECK_NULL(root_map_);
- root_map_ =
- broker->GetOrCreateData(map->FindRootMap(broker->isolate()))->AsMap();
+ root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
}
-MapData* MapData::FindRootMap() const { return root_map_; }
+ObjectData* MapData::FindRootMap() const { return root_map_; }
void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
int depth) {
@@ -2206,13 +2233,16 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
ObjectData* value_data = broker->GetOrCreateData(value);
- value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
- depth - 1);
+ if (!value_data->should_access_heap()) {
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
+ }
}
}
} else {
CHECK(boilerplate->HasDoubleElements());
CHECK_LE(elements_object->Size(), kMaxRegularHeapObjectSize);
+ DCHECK_EQ(elements_->kind(), ObjectDataKind::kSerializedHeapObject);
elements_->AsFixedDoubleArray()->SerializeContents(broker);
}
@@ -2255,7 +2285,7 @@ void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
value = isolate->factory()->uninitialized_value();
}
ObjectData* value_data = broker->GetOrCreateData(value);
- if (value->IsJSObject()) {
+ if (value_data->IsJSObject() && !value_data->should_access_heap()) {
value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
depth - 1);
}
@@ -2303,29 +2333,36 @@ Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
ContextRef ContextRef::previous(size_t* depth,
SerializationPolicy policy) const {
DCHECK_NOT_NULL(depth);
+
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Context current = *object();
while (*depth != 0 && current.unchecked_previous().IsContext()) {
current = Context::cast(current.unchecked_previous());
(*depth)--;
}
- return ContextRef(broker(), handle(current, broker()->isolate()));
+ return ContextRef(broker(), broker()->CanonicalPersistentHandle(current));
}
- ContextData* current = this->data()->AsContext();
- return ContextRef(broker(), current->previous(broker(), depth, policy));
+
+ if (*depth == 0) return *this;
+
+ ObjectData* previous_data = data()->AsContext()->previous(broker(), policy);
+ if (previous_data == nullptr || !previous_data->IsContext()) return *this;
+
+ *depth = *depth - 1;
+ return ContextRef(broker(), previous_data).previous(depth, policy);
}
base::Optional<ObjectRef> ContextRef::get(int index,
SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Handle<Object> value(object()->get(index), broker()->isolate());
return ObjectRef(broker(), value);
}
@@ -2520,8 +2557,6 @@ void JSHeapBroker::Retire() {
#endif // DEBUG
}
-bool JSHeapBroker::SerializingAllowed() const { return mode() == kSerializing; }
-
void JSHeapBroker::SetTargetNativeContextRef(
Handle<NativeContext> native_context) {
// The MapData constructor uses {target_native_context_}. This creates a
@@ -2632,37 +2667,29 @@ void JSHeapBroker::InitializeAndStartSerializing(
CollectArrayAndObjectPrototypes();
- // Serialize Cells
Factory* const f = isolate()->factory();
- GetOrCreateData(f->array_buffer_detaching_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->array_constructor_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->array_iterator_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->array_species_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->many_closures_cell())->AsFeedbackCell();
- GetOrCreateData(f->no_elements_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->promise_hook_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->promise_species_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->promise_then_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- GetOrCreateData(f->string_length_protector())
- ->AsPropertyCell()
- ->Serialize(this);
- // - CEntry stub
+ {
+ ObjectData* data;
+ data = GetOrCreateData(f->array_buffer_detaching_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_constructor_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_iterator_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_species_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->no_elements_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_hook_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_species_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_then_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->string_length_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ }
+ GetOrCreateData(f->many_closures_cell());
GetOrCreateData(
CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
@@ -2683,15 +2710,29 @@ ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
} else if (IsReadOnlyHeapObject(*object)) {
object_data = zone()->New<ObjectData>(this, data_storage, object,
kUnserializedReadOnlyHeapObject);
-#define CREATE_DATA_IF_MATCH(name) \
+// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
+// FLAG_turbo_direct_heap_access.
+#define CREATE_DATA_FOR_DIRECT_READ(name) \
+ } else if (object->Is##name()) { \
+ if (FLAG_turbo_direct_heap_access) { \
+ object_data = zone()->New<ObjectData>( \
+ this, data_storage, object, kNeverSerializedHeapObject); \
+ } else { \
+ CHECK_EQ(mode(), kSerializing); \
+ AllowHandleAllocation handle_allocation; \
+ object_data = zone()->New<name##Data>(this, data_storage, \
+ Handle<name>::cast(object)); \
+ }
+ HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
+#undef CREATE_DATA_FOR_DIRECT_READ
+#define CREATE_DATA_FOR_SERIALIZATION(name) \
} else if (object->Is##name()) { \
- CHECK(SerializingAllowed()); \
+ CHECK_EQ(mode(), kSerializing); \
AllowHandleAllocation handle_allocation; \
object_data = zone()->New<name##Data>(this, data_storage, \
Handle<name>::cast(object));
-
- HEAP_BROKER_OBJECT_LIST(CREATE_DATA_IF_MATCH)
-#undef CREATE_DATA_IF_MATCH
+ HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
+#undef CREATE_DATA_FOR_SERIALIZATION
} else {
UNREACHABLE();
}
@@ -2704,7 +2745,7 @@ ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object) {
// clang-format on
ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
- return GetOrCreateData(handle(object, isolate()));
+ return GetOrCreateData(CanonicalPersistentHandle(object));
}
#define DEFINE_IS_AND_AS(Name) \
@@ -2713,7 +2754,8 @@ ObjectData* JSHeapBroker::GetOrCreateData(Object object) {
DCHECK(Is##Name()); \
return Name##Ref(broker(), data()); \
}
-HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
#undef DEFINE_IS_AND_AS
bool ObjectRef::IsSmi() const { return data()->is_smi(); }
@@ -2726,13 +2768,12 @@ int ObjectRef::AsSmi() const {
base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
- AllowHeapAllocationIf allow_heap_allocation(data()->kind(),
- broker()->mode());
Handle<Map> instance_map;
if (Map::TryGetObjectCreateMap(broker()->isolate(), object())
.ToHandle(&instance_map)) {
@@ -2758,17 +2799,17 @@ INSTANCE_TYPE_CHECKERS(DEF_TESTER)
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHeapAllocationIf allow_heap_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return MapRef(broker(),
Map::AsElementsKind(broker()->isolate(), object(), kind));
}
if (kind == elements_kind()) return *this;
- const ZoneVector<MapData*>& elements_kind_generalizations =
+ const ZoneVector<ObjectData*>& elements_kind_generalizations =
data()->AsMap()->elements_kind_generalizations();
for (auto data : elements_kind_generalizations) {
MapRef map(broker(), data);
@@ -2778,14 +2819,14 @@ base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
}
void MapRef::SerializeForElementLoad() {
+ if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- if (data()->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) return;
data()->AsMap()->SerializeForElementLoad(broker());
}
void MapRef::SerializeForElementStore() {
+ if (data()->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- if (data()->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) return;
data()->AsMap()->SerializeForElementStore(broker());
}
@@ -2843,10 +2884,10 @@ bool MapRef::HasOnlyStablePrototypesWithFastElements(
bool MapRef::supports_fast_array_iteration() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
return SupportsFastArrayIteration(broker()->isolate(), object());
}
return data()->AsMap()->supports_fast_array_iteration();
@@ -2854,10 +2895,10 @@ bool MapRef::supports_fast_array_iteration() const {
bool MapRef::supports_fast_array_resize() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
return SupportsFastArrayResize(broker()->isolate(), object());
}
return data()->AsMap()->supports_fast_array_resize();
@@ -2865,11 +2906,10 @@ bool MapRef::supports_fast_array_resize() const {
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
}
return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
@@ -2904,11 +2944,10 @@ OddballType MapRef::oddball_type() const {
FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return FeedbackCellRef(broker(), object()->GetClosureFeedbackCell(index));
}
@@ -2919,9 +2958,8 @@ FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->RawFastDoublePropertyAt(index);
}
JSObjectData* object_data = data()->AsJSObject();
@@ -2931,9 +2969,8 @@ double JSObjectRef::RawFastDoublePropertyAt(FieldIndex index) const {
uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->RawFastDoublePropertyAsBitsAt(index);
}
JSObjectData* object_data = data()->AsJSObject();
@@ -2943,13 +2980,12 @@ uint64_t JSObjectRef::RawFastDoublePropertyAsBitsAt(FieldIndex index) const {
ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ObjectRef(broker(), handle(object()->RawFastPropertyAt(index),
- broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->RawFastPropertyAt(index)));
}
JSObjectData* object_data = data()->AsJSObject();
CHECK(index.is_inobject());
@@ -2960,13 +2996,13 @@ ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
bool AllocationSiteRef::IsFastLiteral() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHeapAllocationIf allow_heap_allocation(
+ CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
+ AllowHeapAllocationIfNeeded allow_heap_allocation(
data()->kind(), broker()->mode()); // For TryMigrateInstance.
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return IsInlinableFastLiteral(
handle(object()->boilerplate(), broker()->isolate()));
}
@@ -2974,24 +3010,25 @@ bool AllocationSiteRef::IsFastLiteral() const {
}
void AllocationSiteRef::SerializeBoilerplate() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsAllocationSite()->SerializeBoilerplate(broker());
}
void JSObjectRef::SerializeElements() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeElements(broker());
}
void JSObjectRef::EnsureElementsTenured() {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
- AllowHeapAllocationIf allow_heap_allocation(data()->kind(),
- broker()->mode());
Handle<FixedArrayBase> object_elements = elements().object();
if (ObjectInYoungGeneration(*object_elements)) {
@@ -3010,8 +3047,8 @@ void JSObjectRef::EnsureElementsTenured() {
FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return FieldIndex::ForDescriptor(*object(), descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -3020,8 +3057,8 @@ FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
int MapRef::GetInObjectPropertyOffset(int i) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->GetInObjectPropertyOffset(i);
}
return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
@@ -3030,8 +3067,8 @@ int MapRef::GetInObjectPropertyOffset(int i) const {
PropertyDetails MapRef::GetPropertyDetails(
InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->instance_descriptors().GetDetails(descriptor_index);
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
@@ -3040,14 +3077,14 @@ PropertyDetails MapRef::GetPropertyDetails(
NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return NameRef(
broker(),
- handle(object()->instance_descriptors().GetKey(descriptor_index),
- broker()->isolate()));
+ broker()->CanonicalPersistentHandle(
+ object()->instance_descriptors().GetKey(descriptor_index)));
}
DescriptorArrayData* descriptors = data()->AsMap()->instance_descriptors();
return NameRef(broker(),
@@ -3066,10 +3103,10 @@ bool MapRef::IsPrimitiveMap() const {
MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Handle<Map> owner(
object()->FindFieldOwner(broker()->isolate(), descriptor_index),
broker()->isolate());
@@ -3083,10 +3120,10 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Handle<FieldType> field_type(
object()->instance_descriptors().GetFieldType(descriptor_index),
broker()->isolate());
@@ -3100,8 +3137,8 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->IsUnboxedDoubleField(
FieldIndex::ForDescriptor(*object(), descriptor_index));
}
@@ -3113,8 +3150,8 @@ bool MapRef::IsUnboxedDoubleField(InternalIndex descriptor_index) const {
uint16_t StringRef::GetFirstChar() {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->Get(0);
}
return data()->AsString()->first_char();
@@ -3122,12 +3159,12 @@ uint16_t StringRef::GetFirstChar() {
base::Optional<double> StringRef::ToNumber() {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHeapAllocationIfNeeded allow_heap_allocation(data()->kind(),
broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHeapAllocationIf allow_heap_allocation(data()->kind(),
- broker()->mode());
int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
return StringToDouble(broker()->isolate(), object(), flags);
}
@@ -3136,50 +3173,33 @@ base::Optional<double> StringRef::ToNumber() {
int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->constant_elements().length();
}
return data()->AsArrayBoilerplateDescription()->constants_elements_length();
}
-int ObjectBoilerplateDescriptionRef::size() const {
- if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return object()->size();
- }
- return data()->AsObjectBoilerplateDescription()->size();
-}
-
ObjectRef FixedArrayRef::get(int i) const {
if (data_->should_access_heap()) {
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ObjectRef(broker(), handle(object()->get(i), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ObjectRef(broker(),
+ broker()->CanonicalPersistentHandle(object()->get(i)));
}
return ObjectRef(broker(), data()->AsFixedArray()->Get(i));
}
-bool FixedDoubleArrayRef::is_the_hole(int i) const {
- if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return object()->is_the_hole(i);
- }
- return data()->AsFixedDoubleArray()->Get(i).is_hole_nan();
-}
-
-double FixedDoubleArrayRef::get_scalar(int i) const {
+Float64 FixedDoubleArrayRef::get(int i) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return object()->get_scalar(i);
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return Float64::FromBits(object()->get_representation(i));
+ } else {
+ return data()->AsFixedDoubleArray()->Get(i);
}
- CHECK(!data()->AsFixedDoubleArray()->Get(i).is_hole_nan());
- return data()->AsFixedDoubleArray()->Get(i).get_scalar();
}
uint8_t BytecodeArrayRef::get(int index) const { return object()->get(index); }
@@ -3190,12 +3210,12 @@ Address BytecodeArrayRef::GetFirstBytecodeAddress() const {
Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return handle(object()->constant_pool().get(index), broker()->isolate());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return broker()->CanonicalPersistentHandle(
+ object()->constant_pool().get(index));
}
return data()->AsBytecodeArray()->GetConstantAtIndex(index,
broker()->isolate());
@@ -3203,11 +3223,10 @@ Handle<Object> BytecodeArrayRef::GetConstantAtIndex(int index) const {
bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->constant_pool().get(index).IsSmi();
}
return data()->AsBytecodeArray()->IsConstantAtIndexSmi(index);
@@ -3215,25 +3234,22 @@ bool BytecodeArrayRef::IsConstantAtIndexSmi(int index) const {
Smi BytecodeArrayRef::GetConstantAtIndexAsSmi(int index) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return Smi::cast(object()->constant_pool().get(index));
}
return data()->AsBytecodeArray()->GetConstantAtIndexAsSmi(index);
}
void BytecodeArrayRef::SerializeForCompilation() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
data()->AsBytecodeArray()->SerializeForCompilation(broker());
}
-Handle<ByteArray> BytecodeArrayRef::source_positions() const {
- return broker()->CanonicalPersistentHandle(
- object()->SourcePositionTableIfCollected());
+Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
+ return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
}
Address BytecodeArrayRef::handler_table_address() const {
@@ -3245,86 +3261,87 @@ int BytecodeArrayRef::handler_table_size() const {
return object()->handler_table().length();
}
-Handle<Object> JSHeapBroker::GetRootHandle(Object object) {
- RootIndex root_index;
- CHECK(root_index_map().Lookup(object.ptr(), &root_index));
- return Handle<Object>(isolate()->root_handle(root_index).location());
-}
-
-// Accessors for direct heap reads.
-#define DIRECT_HEAP_ACCESSOR_C(holder, result, name) \
- result holder##Ref::name() const { return object()->name(); }
+#define IF_ACCESS_FROM_HEAP_C(name) \
+ if (data_->should_access_heap()) { \
+ AllowHandleAllocationIfNeeded handle_allocation(data_->kind(), \
+ broker()->mode()); \
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data_->kind(), \
+ broker()->mode()); \
+ return object()->name(); \
+ }
-#define IF_ACCESS_FROM_HEAP_C(holder, name) \
- if (data_->should_access_heap()) { \
- CHECK(broker()->mode() == JSHeapBroker::kDisabled || \
- ReadOnlyHeap::Contains(HeapObject::cast(*object()))); \
- AllowHandleAllocationIf handle_allocation(data_->kind(), \
- broker()->mode()); \
- AllowHandleDereferenceIf allow_handle_dereference(data_->kind(), \
- broker()->mode()); \
- return object()->name(); \
+#define IF_ACCESS_FROM_HEAP(result, name) \
+ if (data_->should_access_heap()) { \
+ AllowHandleAllocationIfNeeded handle_allocation(data_->kind(), \
+ broker()->mode()); \
+ AllowHandleDereferenceIfNeeded handle_dereference(data_->kind(), \
+ broker()->mode()); \
+ return result##Ref(broker(), \
+ broker()->CanonicalPersistentHandle(object()->name())); \
}
-#define IF_ACCESS_FROM_HEAP(holder, result, name) \
- if (data_->kind() == ObjectDataKind::kUnserializedHeapObject) { \
- AllowHandleAllocationIf handle_allocation(data_->kind(), \
- broker()->mode()); \
- AllowHandleDereferenceIf handle_dereference(data_->kind(), \
- broker()->mode()); \
- return result##Ref(broker(), \
- handle(object()->name(), broker()->isolate())); \
- } else if (data_->kind() == \
- ObjectDataKind::kUnserializedReadOnlyHeapObject) { \
- AllowHandleDereferenceIf handle_dereference(data_->kind(), \
- broker()->mode()); \
- return result##Ref(broker(), broker()->GetRootHandle(object()->name())); \
- }
-
-// Macros for definining a const getter that, depending on the broker mode,
-// either looks into the handle or into the serialized data.
-#define BIMODAL_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP(holder, result, name); \
- ObjectData* data = ObjectRef::data()->As##holder()->name(); \
- if (data->kind() == ObjectDataKind::kUnserializedHeapObject) { \
- return result##Ref(broker(), data->object()); \
- } else { \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- } \
+// Macros for definining a const getter that, depending on the data kind,
+// either looks into the heap or into the serialized data.
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP(result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
}
// Like above except that the result type is not an XYZRef.
#define BIMODAL_ACCESSOR_C(holder, result, name) \
result holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_C(holder, name); \
+ IF_ACCESS_FROM_HEAP_C(name); \
return ObjectRef::data()->As##holder()->name(); \
}
// Like above but for BitFields.
#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
typename BitField::FieldType holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_C(holder, name); \
+ IF_ACCESS_FROM_HEAP_C(name); \
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
+// Like IF_ACCESS_FROM_HEAP_C but we also allow direct heap access for
+// kSerialized only for methods that we identified to be safe.
+#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
+ AllowHandleAllocationIfNeeded handle_allocation( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ AllowHandleDereferenceIfNeeded allow_handle_dereference( \
+ data_->kind(), broker()->mode(), FLAG_turbo_direct_heap_access); \
+ return object()->name(); \
+ }
+
+// Like BIMODAL_ACCESSOR_C except that we force a direct heap access if
+// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
+// we identified the method to be safe to use direct heap access, but the
+// holder##Data class still needs to be serialized.
+#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
+ return ObjectRef::data()->As##holder()->name(); \
+ }
+
BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
-DIRECT_HEAP_ACCESSOR_C(BytecodeArray, int, register_count)
-DIRECT_HEAP_ACCESSOR_C(BytecodeArray, int, parameter_count)
-DIRECT_HEAP_ACCESSOR_C(BytecodeArray, interpreter::Register,
- incoming_new_target_or_generator_register)
+BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
-BIMODAL_ACCESSOR(Cell, Object, value)
+BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, int, parameter_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
+ incoming_new_target_or_generator_register)
BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
BIMODAL_ACCESSOR(HeapObject, Map, map)
+BIMODAL_ACCESSOR_C(HeapNumber, double, value)
+
BIMODAL_ACCESSOR(JSArray, Object, length)
BIMODAL_ACCESSOR(JSBoundFunction, JSReceiver, bound_target_function)
@@ -3332,7 +3349,6 @@ BIMODAL_ACCESSOR(JSBoundFunction, Object, bound_this)
BIMODAL_ACCESSOR(JSBoundFunction, FixedArray, bound_arguments)
BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
-BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_offset)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
@@ -3388,28 +3404,27 @@ BIMODAL_ACCESSOR_C(Code, unsigned, inlined_bytecode_size)
BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
#undef DEF_NATIVE_CONTEXT_ACCESSOR
+BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
+
BIMODAL_ACCESSOR(PropertyCell, Object, value)
BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
return CallHandlerInfoRef(
- broker(), handle(object()->call_code(), broker()->isolate()));
+ broker(), broker()->CanonicalPersistentHandle(object()->call_code()));
}
- CallHandlerInfoData* call_code =
- data()->AsFunctionTemplateInfo()->call_code();
+ ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
if (!call_code) return base::nullopt;
return CallHandlerInfoRef(broker(), call_code);
}
bool FunctionTemplateInfoRef::is_signature_undefined() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
return object()->signature().IsUndefined(broker()->isolate());
}
@@ -3418,11 +3433,10 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
bool FunctionTemplateInfoRef::has_call_code() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
CallOptimization call_optimization(broker()->isolate(), object());
return call_optimization.is_simple_api_call();
@@ -3437,11 +3451,10 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
const HolderLookupResult not_found;
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
CallOptimization call_optimization(broker()->isolate(), object());
Handle<Map> receiver_map_ref(receiver_map.object());
@@ -3468,7 +3481,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
KnownReceiversMap::iterator lookup_it =
- fti_data->known_receivers().find(receiver_map.data()->AsMap());
+ fti_data->known_receivers().find(receiver_map.data());
if (lookup_it != fti_data->known_receivers().cend()) {
return lookup_it->second;
}
@@ -3479,8 +3492,7 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
}
if (!receiver_map.IsJSReceiverMap() ||
(receiver_map.is_access_check_needed() && !accept_any_receiver())) {
- fti_data->known_receivers().insert(
- {receiver_map.data()->AsMap(), not_found});
+ fti_data->known_receivers().insert({receiver_map.data(), not_found});
return not_found;
}
@@ -3492,14 +3504,12 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
switch (result.lookup) {
case CallOptimization::kHolderFound: {
result.holder = JSObjectRef(broker(), holder);
- fti_data->known_receivers().insert(
- {receiver_map.data()->AsMap(), result});
+ fti_data->known_receivers().insert({receiver_map.data(), result});
break;
}
default: {
DCHECK_EQ(result.holder, base::nullopt);
- fti_data->known_receivers().insert(
- {receiver_map.data()->AsMap(), result});
+ fti_data->known_receivers().insert({receiver_map.data(), result});
}
}
return result;
@@ -3521,13 +3531,13 @@ BIMODAL_ACCESSOR(FeedbackCell, HeapObject, value)
base::Optional<ObjectRef> MapRef::GetStrongValue(
InternalIndex descriptor_index) const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
MaybeObject value =
object()->instance_descriptors().GetValue(descriptor_index);
HeapObject object;
if (value.GetHeapObjectIfStrong(&object)) {
- return ObjectRef(broker(), handle(object, broker()->isolate()));
+ return ObjectRef(broker(), broker()->CanonicalPersistentHandle((object)));
}
return base::nullopt;
}
@@ -3545,19 +3555,14 @@ void MapRef::SerializeRootMap() {
}
base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->kind() == ObjectDataKind::kUnserializedHeapObject) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return MapRef(broker(), handle(object()->FindRootMap(broker()->isolate()),
- broker()->isolate()));
- } else if (data_->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return MapRef(broker(), broker()->GetRootHandle(
+ if (data_->should_access_heap()) {
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return MapRef(broker(), broker()->CanonicalPersistentHandle(
object()->FindRootMap(broker()->isolate())));
}
- MapData* map_data = data()->AsMap()->FindRootMap();
- if (map_data) {
+ ObjectData* map_data = data()->AsMap()->FindRootMap();
+ if (map_data != nullptr) {
return MapRef(broker(), map_data);
}
TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
@@ -3566,58 +3571,58 @@ base::Optional<MapRef> MapRef::FindRootMap() const {
void* JSTypedArrayRef::data_ptr() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->DataPtr();
}
return data()->AsJSTypedArray()->data_ptr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
- IF_ACCESS_FROM_HEAP_C(Map, IsInobjectSlackTrackingInProgress);
+ IF_ACCESS_FROM_HEAP_C(IsInobjectSlackTrackingInProgress);
return Map::Bits3::ConstructionCounterBits::decode(
data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
}
int MapRef::constructor_function_index() const {
- IF_ACCESS_FROM_HEAP_C(Map, GetConstructorFunctionIndex);
+ IF_ACCESS_FROM_HEAP_C(GetConstructorFunctionIndex);
CHECK(IsPrimitiveMap());
return data()->AsMap()->constructor_function_index();
}
bool MapRef::is_stable() const {
- IF_ACCESS_FROM_HEAP_C(Map, is_stable);
+ IF_ACCESS_FROM_HEAP_C(is_stable);
return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
}
bool MapRef::CanBeDeprecated() const {
- IF_ACCESS_FROM_HEAP_C(Map, CanBeDeprecated);
+ IF_ACCESS_FROM_HEAP_C(CanBeDeprecated);
CHECK_GT(NumberOfOwnDescriptors(), 0);
return data()->AsMap()->can_be_deprecated();
}
bool MapRef::CanTransition() const {
- IF_ACCESS_FROM_HEAP_C(Map, CanTransition);
+ IF_ACCESS_FROM_HEAP_C(CanTransition);
return data()->AsMap()->can_transition();
}
int MapRef::GetInObjectPropertiesStartInWords() const {
- IF_ACCESS_FROM_HEAP_C(Map, GetInObjectPropertiesStartInWords);
+ IF_ACCESS_FROM_HEAP_C(GetInObjectPropertiesStartInWords);
return data()->AsMap()->in_object_properties_start_in_words();
}
int MapRef::GetInObjectProperties() const {
- IF_ACCESS_FROM_HEAP_C(Map, GetInObjectProperties);
+ IF_ACCESS_FROM_HEAP_C(GetInObjectProperties);
return data()->AsMap()->in_object_properties();
}
int ScopeInfoRef::ContextLength() const {
- IF_ACCESS_FROM_HEAP_C(ScopeInfo, ContextLength);
+ IF_ACCESS_FROM_HEAP_C(ContextLength);
return data()->AsScopeInfo()->context_length();
}
int ScopeInfoRef::Flags() const {
- IF_ACCESS_FROM_HEAP_C(ScopeInfo, Flags);
+ IF_ACCESS_FROM_HEAP_C(Flags);
return data()->AsScopeInfo()->flags();
}
@@ -3626,30 +3631,30 @@ bool ScopeInfoRef::HasContextExtension() const {
}
bool ScopeInfoRef::HasOuterScopeInfo() const {
- IF_ACCESS_FROM_HEAP_C(ScopeInfo, HasOuterScopeInfo);
+ IF_ACCESS_FROM_HEAP_C(HasOuterScopeInfo);
return data()->AsScopeInfo()->has_outer_scope_info();
}
ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ScopeInfoRef(
- broker(), handle(object()->OuterScopeInfo(), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ScopeInfoRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->OuterScopeInfo()));
}
return ScopeInfoRef(broker(), data()->AsScopeInfo()->outer_scope_info());
}
void ScopeInfoRef::SerializeScopeInfoChain() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsScopeInfo()->SerializeScopeInfoChain(broker());
}
bool StringRef::IsExternalString() const {
- IF_ACCESS_FROM_HEAP_C(String, IsExternalString);
+ IF_ACCESS_FROM_HEAP_C(IsExternalString);
return data()->AsString()->is_external_string();
}
@@ -3661,47 +3666,45 @@ Address CallHandlerInfoRef::callback() const {
}
Address FunctionTemplateInfoRef::c_function() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
+ if (data_->should_access_heap()) {
return v8::ToCData<Address>(object()->GetCFunction());
}
return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_function();
}
const CFunctionInfo* FunctionTemplateInfoRef::c_signature() const {
- if (broker()->mode() == JSHeapBroker::kDisabled) {
+ if (data_->should_access_heap()) {
return v8::ToCData<CFunctionInfo*>(object()->GetCSignature());
}
return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_signature();
}
bool StringRef::IsSeqString() const {
- IF_ACCESS_FROM_HEAP_C(String, IsSeqString);
+ IF_ACCESS_FROM_HEAP_C(IsSeqString);
return data()->AsString()->is_seq_string();
}
ScopeInfoRef NativeContextRef::scope_info() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ScopeInfoRef(broker(),
- handle(object()->scope_info(), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ScopeInfoRef(
+ broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
}
return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return SharedFunctionInfoRef(
broker(),
- handle(object()->shared_function_info(), broker()->isolate()));
+ broker()->CanonicalPersistentHandle(object()->shared_function_info()));
}
return SharedFunctionInfoRef(
@@ -3771,8 +3774,8 @@ bool ObjectRef::IsTheHole() const {
bool ObjectRef::BooleanValue() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
return object()->BooleanValue(broker()->isolate());
}
return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
@@ -3805,18 +3808,22 @@ Maybe<double> ObjectRef::OddballToNumber() const {
base::Optional<ObjectRef> ObjectRef::GetOwnConstantElement(
uint32_t index, SerializationPolicy policy) const {
- if (data_->kind() == ObjectDataKind::kUnserializedHeapObject) {
- return (IsJSObject() || IsString())
- ? GetOwnElementFromHeap(broker(), object(), index, true)
- : base::nullopt;
- } else if (data_->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- DCHECK(!IsJSObject());
- // TODO(mythria): For ReadOnly strings, currently we cannot access data from
- // heap without creating handles since we use LookupIterator. We should have
- // a custom implementation for read only strings that doesn't create
- // handles. Till then it is OK to disable this optimization since this only
- // impacts keyed accesses on read only strings.
- return base::nullopt;
+ if (!(IsJSObject() || IsString())) return base::nullopt;
+ if (data_->should_access_heap()) {
+ // TODO(neis): Once the CHECK_NE below is eliminated, i.e. once we can
+ // safely read from the background thread, the special branch for read-only
+ // objects can be removed as well.
+ if (data_->kind() == ObjectDataKind::kUnserializedReadOnlyHeapObject) {
+ DCHECK(IsString());
+ // TODO(mythria): For ReadOnly strings, currently we cannot access data
+ // from heap without creating handles since we use LookupIterator. We
+ // should have a custom implementation for read only strings that doesn't
+ // create handles. Till then it is OK to disable this optimization since
+ // this only impacts keyed accesses on read only strings.
+ return base::nullopt;
+ }
+ CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
+ return GetOwnElementFromHeap(broker(), object(), index, true);
}
ObjectData* element = nullptr;
if (IsJSObject()) {
@@ -3833,7 +3840,6 @@ base::Optional<ObjectRef> JSObjectRef::GetOwnDataProperty(
Representation field_representation, FieldIndex index,
SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
return GetOwnDataPropertyFromHeap(broker(),
Handle<JSObject>::cast(object()),
field_representation, index);
@@ -3865,40 +3871,29 @@ base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
return ObjectRef(broker(), element);
}
-double HeapNumberRef::value() const {
- IF_ACCESS_FROM_HEAP_C(HeapNumber, value);
- return data()->AsHeapNumber()->value();
-}
-
-uint64_t BigIntRef::AsUint64() const {
- IF_ACCESS_FROM_HEAP_C(BigInt, AsUint64);
- return data()->AsBigInt()->AsUint64();
-}
-
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
- if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return CellRef(broker(),
- handle(object()->GetCell(cell_index), broker()->isolate()));
- }
- CellData* cell = data()->AsSourceTextModule()->GetCell(broker(), cell_index);
+ if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
+ AllowHandleAllocationIfNeeded allow_handle_allocation(
+ data()->kind(), broker()->mode(), FLAG_turbo_direct_heap_access);
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(
+ data()->kind(), broker()->mode(), FLAG_turbo_direct_heap_access);
+ return CellRef(broker(), broker()->CanonicalPersistentHandle(
+ object()->GetCell(cell_index)));
+ }
+ ObjectData* cell =
+ data()->AsSourceTextModule()->GetCell(broker(), cell_index);
if (cell == nullptr) return base::nullopt;
return CellRef(broker(), cell);
}
ObjectRef SourceTextModuleRef::import_meta() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ObjectRef(broker(),
- handle(object()->import_meta(), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ObjectRef(
+ broker(), broker()->CanonicalPersistentHandle(object()->import_meta()));
}
return ObjectRef(broker(),
data()->AsSourceTextModule()->GetImportMeta(broker()));
@@ -3919,7 +3914,7 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
RefsMap::Entry* entry = broker->refs_->LookupOrInsert(object.address());
ObjectData** storage = &(entry->value);
if (*storage == nullptr) {
- AllowHandleDereferenceIf allow_handle_dereference(
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(
kUnserializedHeapObject, broker->mode());
entry->value = broker->zone()->New<ObjectData>(
broker, storage, object,
@@ -3932,8 +3927,8 @@ ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
UNREACHABLE();
}
if (!data_) { // TODO(mslekova): Remove once we're on the background thread.
- AllowHandleDereferenceIf allow_handle_dereference(data_->kind(),
- broker->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data_->kind(),
+ broker->mode());
object->Print();
}
CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker");
@@ -3969,8 +3964,8 @@ OddballType GetOddballType(Isolate* isolate, Map map) {
HeapObjectType HeapObjectRef::GetHeapObjectType() const {
if (data_->should_access_heap()) {
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Map map = Handle<HeapObject>::cast(object())->map();
HeapObjectType::Flags flags(0);
if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
@@ -3985,15 +3980,14 @@ HeapObjectType HeapObjectRef::GetHeapObjectType() const {
}
base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return JSObjectRef(broker(),
- handle(object()->boilerplate(), broker()->isolate()));
- }
- JSObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return JSObjectRef(
+ broker(), broker()->CanonicalPersistentHandle(object()->boilerplate()));
+ }
+ ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
if (boilerplate) {
return JSObjectRef(broker(), boilerplate);
} else {
@@ -4007,24 +4001,18 @@ ElementsKind JSObjectRef::GetElementsKind() const {
FixedArrayBaseRef JSObjectRef::elements() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return FixedArrayBaseRef(broker(),
- handle(object()->elements(), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return FixedArrayBaseRef(
+ broker(), broker()->CanonicalPersistentHandle(object()->elements()));
}
- ObjectData* elements_data = data()->AsJSObject()->elements();
- if (elements_data->kind() ==
- ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- return FixedArrayBaseRef(broker(), elements_data->object());
- }
- return FixedArrayBaseRef(broker(), elements_data->AsFixedArrayBase());
+ return FixedArrayBaseRef(broker(), data()->AsJSObject()->elements());
}
int FixedArrayBaseRef::length() const {
- IF_ACCESS_FROM_HEAP_C(FixedArrayBase, length);
+ IF_ACCESS_FROM_HEAP_C(length);
return data()->AsFixedArrayBase()->length();
}
@@ -4051,11 +4039,12 @@ base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
}
void FeedbackVectorRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsFeedbackVector()->Serialize(broker());
}
bool FeedbackVectorRef::serialized() const {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return true;
return data()->AsFeedbackVector()->serialized();
}
@@ -4066,32 +4055,33 @@ bool NameRef::IsUniqueName() const {
}
ObjectRef JSRegExpRef::data() const {
- IF_ACCESS_FROM_HEAP(JSRegExp, Object, data);
+ IF_ACCESS_FROM_HEAP(Object, data);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->data());
}
ObjectRef JSRegExpRef::flags() const {
- IF_ACCESS_FROM_HEAP(JSRegExp, Object, flags);
+ IF_ACCESS_FROM_HEAP(Object, flags);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->flags());
}
ObjectRef JSRegExpRef::last_index() const {
- IF_ACCESS_FROM_HEAP(JSRegExp, Object, last_index);
+ IF_ACCESS_FROM_HEAP(Object, last_index);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->last_index());
}
ObjectRef JSRegExpRef::raw_properties_or_hash() const {
- IF_ACCESS_FROM_HEAP(JSRegExp, Object, raw_properties_or_hash);
+ IF_ACCESS_FROM_HEAP(Object, raw_properties_or_hash);
return ObjectRef(broker(),
ObjectRef::data()->AsJSRegExp()->raw_properties_or_hash());
}
ObjectRef JSRegExpRef::source() const {
- IF_ACCESS_FROM_HEAP(JSRegExp, Object, source);
+ IF_ACCESS_FROM_HEAP(Object, source);
return ObjectRef(broker(), ObjectRef::data()->AsJSRegExp()->source());
}
void JSRegExpRef::SerializeAsRegExpBoilerplate() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
JSObjectRef::data()->AsJSRegExp()->SerializeAsRegExpBoilerplate(broker());
}
@@ -4122,7 +4112,8 @@ Handle<Object> ObjectRef::object() const {
}
#endif // DEBUG
-HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
+HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
#undef DEF_OBJECT_GETTER
JSHeapBroker* ObjectRef::broker() const { return broker_; }
@@ -4163,13 +4154,15 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
TraceScope tracer(broker, this, "NativeContextData::Serialize");
Handle<NativeContext> context = Handle<NativeContext>::cast(object());
-#define SERIALIZE_MEMBER(type, name) \
- DCHECK_NULL(name##_); \
- name##_ = broker->GetOrCreateData(context->name())->As##type(); \
- if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
- if (name##_->IsMap() && \
- !InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
- name##_->AsMap()->SerializeConstructor(broker); \
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name()); \
+ if (!name##_->should_access_heap()) { \
+ if (name##_->IsJSFunction()) name##_->AsJSFunction()->Serialize(broker); \
+ if (name##_->IsMap() && \
+ !InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
+ name##_->AsMap()->SerializeConstructor(broker); \
+ } \
}
BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
if (!broker->isolate()->bootstrapper()->IsActive()) {
@@ -4177,35 +4170,37 @@ void NativeContextData::Serialize(JSHeapBroker* broker) {
}
#undef SERIALIZE_MEMBER
- bound_function_with_constructor_map_->SerializePrototype(broker);
- bound_function_without_constructor_map_->SerializePrototype(broker);
+ if (!bound_function_with_constructor_map_->should_access_heap()) {
+ bound_function_with_constructor_map_->AsMap()->SerializePrototype(broker);
+ }
+ if (!bound_function_without_constructor_map_->should_access_heap()) {
+ bound_function_without_constructor_map_->AsMap()->SerializePrototype(
+ broker);
+ }
DCHECK(function_maps_.empty());
int const first = Context::FIRST_FUNCTION_MAP_INDEX;
int const last = Context::LAST_FUNCTION_MAP_INDEX;
function_maps_.reserve(last + 1 - first);
for (int i = first; i <= last; ++i) {
- function_maps_.push_back(broker->GetOrCreateData(context->get(i))->AsMap());
+ function_maps_.push_back(broker->GetOrCreateData(context->get(i)));
}
- scope_info_ = broker->GetOrCreateData(context->scope_info())->AsScopeInfo();
+ scope_info_ = broker->GetOrCreateData(context->scope_info());
}
void JSFunctionRef::Serialize() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSFunction()->Serialize(broker());
}
bool JSBoundFunctionRef::serialized() const {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return true;
return data()->AsJSBoundFunction()->serialized();
}
bool JSFunctionRef::serialized() const {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return true;
return data()->AsJSFunction()->serialized();
}
@@ -4225,11 +4220,10 @@ JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
}
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
isolate(), broker()->target_native_context().object(),
@@ -4237,28 +4231,30 @@ JSArrayRef SharedFunctionInfoRef::GetTemplateObject(
return JSArrayRef(broker(), template_object);
}
- JSArrayData* array =
+ ObjectData* array =
data()->AsSharedFunctionInfo()->GetTemplateObject(source.slot);
if (array != nullptr) return JSArrayRef(broker(), array);
CHECK_EQ(policy, SerializationPolicy::kSerializeIfNeeded);
- CHECK(broker()->SerializingAllowed());
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
Handle<JSArray> template_object =
TemplateObjectDescription::GetTemplateObject(
broker()->isolate(), broker()->target_native_context().object(),
description.object(), object(), source.slot.ToInt());
- array = broker()->GetOrCreateData(template_object)->AsJSArray();
+ array = broker()->GetOrCreateData(template_object);
data()->AsSharedFunctionInfo()->SetTemplateObject(source.slot, array);
return JSArrayRef(broker(), array);
}
void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
}
void SharedFunctionInfoRef::SerializeScopeInfoChain() {
+ if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSharedFunctionInfo()->SerializeScopeInfoChain(broker());
}
@@ -4266,39 +4262,37 @@ void SharedFunctionInfoRef::SerializeScopeInfoChain() {
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (object()->IsApiFunction()) {
return FunctionTemplateInfoRef(
- broker(), handle(object()->function_data(), broker()->isolate()));
+ broker(),
+ broker()->CanonicalPersistentHandle(object()->function_data()));
}
return base::nullopt;
}
- FunctionTemplateInfoData* function_template_info =
+ ObjectData* function_template_info =
data()->AsSharedFunctionInfo()->function_template_info();
if (!function_template_info) return base::nullopt;
return FunctionTemplateInfoRef(broker(), function_template_info);
}
int SharedFunctionInfoRef::context_header_size() const {
- IF_ACCESS_FROM_HEAP_C(SharedFunctionInfo, scope_info().ContextHeaderLength);
+ IF_ACCESS_FROM_HEAP_C(scope_info().ContextHeaderLength);
return data()->AsSharedFunctionInfo()->context_header_size();
}
ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
- AllowHandleAllocationIf allow_handle_allocation(data()->kind(),
- broker()->mode());
- AllowHandleDereferenceIf allow_handle_dereference(data()->kind(),
- broker()->mode());
- return ScopeInfoRef(broker(),
- handle(object()->scope_info(), broker()->isolate()));
+ AllowHandleAllocationIfNeeded allow_handle_allocation(data()->kind(),
+ broker()->mode());
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(data()->kind(),
+ broker()->mode());
+ return ScopeInfoRef(
+ broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
}
return ScopeInfoRef(broker(), data()->AsSharedFunctionInfo()->scope_info());
}
void JSObjectRef::SerializeObjectCreateMap() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSObject()->SerializeObjectCreateMap(broker());
@@ -4345,14 +4339,12 @@ bool MapRef::serialized_prototype() const {
}
void SourceTextModuleRef::Serialize() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsSourceTextModule()->Serialize(broker());
}
void NativeContextRef::Serialize() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsNativeContext()->Serialize(broker());
@@ -4370,21 +4362,18 @@ bool JSTypedArrayRef::serialized() const {
}
void JSBoundFunctionRef::Serialize() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsJSBoundFunction()->Serialize(broker());
}
void PropertyCellRef::Serialize() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsPropertyCell()->Serialize(broker());
}
void FunctionTemplateInfoRef::SerializeCallCode() {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
if (data_->should_access_heap()) return;
CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
@@ -4393,12 +4382,10 @@ void FunctionTemplateInfoRef::SerializeCallCode() {
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
NameRef const& name, SerializationPolicy policy) const {
if (data_->should_access_heap()) {
- DCHECK(data_->kind() != ObjectDataKind::kUnserializedReadOnlyHeapObject);
return GetPropertyCellFromHeap(broker(), name.object());
}
- PropertyCellData* property_cell_data =
- data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data()->AsName(), policy);
+ ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
+ broker(), name.data(), policy);
if (property_cell_data == nullptr) return base::nullopt;
return PropertyCellRef(broker(), property_cell_data);
}
@@ -4479,7 +4466,6 @@ GlobalAccessFeedback::GlobalAccessFeedback(PropertyCellRef cell,
GlobalAccessFeedback::GlobalAccessFeedback(FeedbackSlotKind slot_kind)
: ProcessedFeedback(kGlobalAccess, slot_kind),
- cell_or_context_(base::nullopt),
index_and_immutable_(0 /* doesn't matter */) {
DCHECK(IsGlobalICKind(slot_kind));
}
@@ -4611,12 +4597,12 @@ bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
}
MinimorphicLoadPropertyAccessFeedback::MinimorphicLoadPropertyAccessFeedback(
- NameRef const& name, FeedbackSlotKind slot_kind, bool is_monomorphic,
- Handle<Object> handler, bool has_migration_target_maps)
+ NameRef const& name, FeedbackSlotKind slot_kind, Handle<Object> handler,
+ MaybeHandle<Map> maybe_map, bool has_migration_target_maps)
: ProcessedFeedback(kMinimorphicPropertyAccess, slot_kind),
name_(name),
- is_monomorphic_(is_monomorphic),
handler_(handler),
+ maybe_map_(maybe_map),
has_migration_target_maps_(has_migration_target_maps) {
DCHECK(IsLoadICKind(slot_kind));
}
@@ -4690,11 +4676,22 @@ void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
}
MaybeObjectHandle TryGetMinimorphicHandler(
- std::vector<MapAndHandler> const& maps_and_handlers,
- FeedbackSlotKind kind) {
+ std::vector<MapAndHandler> const& maps_and_handlers, FeedbackSlotKind kind,
+ Handle<NativeContext> native_context) {
if (!FLAG_dynamic_map_checks || !IsLoadICKind(kind))
return MaybeObjectHandle();
+ // Don't use dynamic map checks when loading properties from Array.prototype.
+ // Using dynamic map checks prevents constant folding and hence does not
+ // inline the array builtins. We only care about monomorphic cases here. For
+ // polymorphic loads currently we don't inline the builtins even without
+ // dynamic map checks.
+ if (maps_and_handlers.size() == 1 &&
+ *maps_and_handlers[0].first ==
+ native_context->initial_array_prototype().map()) {
+ return MaybeObjectHandle();
+ }
+
MaybeObjectHandle initial_handler;
for (MapAndHandler map_and_handler : maps_and_handlers) {
auto map = map_and_handler.first;
@@ -4752,11 +4749,16 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForPropertyAccess(
base::Optional<NameRef> name =
static_name.has_value() ? static_name : GetNameFeedback(nexus);
- MaybeObjectHandle handler = TryGetMinimorphicHandler(maps_and_handlers, kind);
+ MaybeObjectHandle handler = TryGetMinimorphicHandler(
+ maps_and_handlers, kind, target_native_context().object());
if (!handler.is_null()) {
+ MaybeHandle<Map> maybe_map;
+ if (nexus.ic_state() == MONOMORPHIC) {
+ DCHECK_EQ(maps.size(), 1);
+ maybe_map = maps[0];
+ }
return *zone()->New<MinimorphicLoadPropertyAccessFeedback>(
- *name, kind, nexus.ic_state() == MONOMORPHIC, handler.object(),
- HasMigrationTargets(maps));
+ *name, kind, handler.object(), maybe_map, HasMigrationTargets(maps));
}
FilterRelevantReceiverMaps(isolate(), &maps);
@@ -5169,15 +5171,13 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
- if (ref.data_->kind() == ObjectDataKind::kUnserializedHeapObject ||
- !FLAG_concurrent_recompilation) {
+ if (!FLAG_concurrent_recompilation) {
// We cannot be in a background thread so it's safe to read the heap.
AllowHandleDereference allow_handle_dereference;
return os << ref.data() << " {" << ref.object() << "}";
- } else if (ref.data_->kind() ==
- ObjectDataKind::kUnserializedReadOnlyHeapObject) {
- AllowHandleDereferenceIf allow_handle_dereference(ref.data()->kind(),
- ref.broker()->mode());
+ } else if (ref.data_->should_access_heap()) {
+ AllowHandleDereferenceIfNeeded allow_handle_dereference(
+ ref.data()->kind(), ref.broker()->mode());
return os << ref.data() << " {" << ref.object() << "}";
} else {
return os << ref.data();
@@ -5210,7 +5210,7 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(
map.object(), name.object(), access_mode);
if (is_concurrent_inlining_) {
- CHECK(SerializingAllowed());
+ CHECK_EQ(mode(), kSerializing);
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
@@ -5222,12 +5222,13 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessFeedback const& feedback,
FeedbackSource const& source, SerializationPolicy policy) {
- auto it = minimorphic_property_access_infos_.find(source.index());
+ auto it = minimorphic_property_access_infos_.find(source);
if (it != minimorphic_property_access_infos_.end()) return it->second;
if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_BROKER_MISSING(
- this, "MinimorphicLoadPropertyAccessInfo for slot " << source.index());
+ TRACE_BROKER_MISSING(this, "MinimorphicLoadPropertyAccessInfo for slot "
+ << source.index() << " "
+ << ObjectRef(this, source.vector));
return MinimorphicLoadPropertyAccessInfo::Invalid();
}
@@ -5235,9 +5236,10 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
- TRACE(this,
- "Storing MinimorphicLoadPropertyAccessInfo for " << source.index());
- minimorphic_property_access_infos_.insert({source.index(), access_info});
+ TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
+ << source.index() << " "
+ << ObjectRef(this, source.vector));
+ minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
}
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index fdac46b9fd..d2bfbace26 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -10,6 +10,7 @@
#include "src/common/globals.h"
#include "src/compiler/access-info.h"
#include "src/compiler/feedback-source.h"
+#include "src/compiler/globals.h"
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
#include "src/compiler/serializer-hints.h"
@@ -103,6 +104,12 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool is_native_context_independent() const {
return is_native_context_independent_;
}
+ bool generate_full_feedback_collection() const {
+ // NCI code currently collects full feedback.
+ DCHECK_IMPLIES(is_native_context_independent(),
+ CollectFeedbackInGenericLowering());
+ return is_native_context_independent();
+ }
enum BrokerMode { kDisabled, kSerializing, kSerialized, kRetired };
BrokerMode mode() const { return mode_; }
@@ -363,7 +370,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ZoneUnorderedMap<PropertyAccessTarget, PropertyAccessInfo,
PropertyAccessTarget::Hash, PropertyAccessTarget::Equal>
property_access_infos_;
- ZoneUnorderedMap<int, MinimorphicLoadPropertyAccessInfo>
+ ZoneUnorderedMap<FeedbackSource, MinimorphicLoadPropertyAccessInfo,
+ FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
ZoneVector<ObjectData*> typed_array_string_tags_;
@@ -441,6 +449,27 @@ class OffHeapBytecodeArray final : public interpreter::AbstractBytecodeArray {
BytecodeArrayRef array_;
};
+// Scope that unparks the LocalHeap, if:
+// a) We have a JSHeapBroker,
+// b) Said JSHeapBroker has a LocalHeap, and
+// c) Said LocalHeap has been parked.
+// Used, for example, when printing the graph with --trace-turbo with a
+// previously parked LocalHeap.
+class UnparkedScopeIfNeeded {
+ public:
+ explicit UnparkedScopeIfNeeded(JSHeapBroker* broker) {
+ if (broker != nullptr) {
+ LocalHeap* local_heap = broker->local_heap();
+ if (local_heap != nullptr && local_heap->IsParked()) {
+ unparked_scope.emplace(local_heap);
+ }
+ }
+ }
+
+ private:
+ base::Optional<UnparkedScope> unparked_scope;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-heap-copy-reducer.cc b/deps/v8/src/compiler/js-heap-copy-reducer.cc
index 069d42bc34..837369ec55 100644
--- a/deps/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/deps/v8/src/compiler/js-heap-copy-reducer.cc
@@ -171,6 +171,13 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kJSLoadNamedFromSuper: {
+ // TODO(marja, v8:9237): Process feedback once it's added to the byte
+ // code.
+ NamedAccess const& p = NamedAccessOf(node->op());
+ NameRef name(broker(), p.name());
+ break;
+ }
case IrOpcode::kJSStoreNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
NameRef name(broker(), p.name());
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 66e7820d65..74e9d2c012 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -470,8 +470,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
CallFrequency frequency = call.frequency();
BuildGraphFromBytecode(broker(), zone(), *shared_info, feedback_vector,
BailoutId::None(), jsgraph(), frequency,
- source_positions_, inlining_id, flags,
- &info_->tick_counter());
+ source_positions_, inlining_id, info_->code_kind(),
+ flags, &info_->tick_counter());
}
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index c5fda401dd..03ac064c4e 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -82,7 +82,7 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToLength(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
- case Runtime::kInlineToStringRT:
+ case Runtime::kInlineToString:
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 2fd99244d1..2a4524f386 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1071,13 +1071,10 @@ Reduction JSNativeContextSpecialization::ReduceMinimorphicPropertyAccess(
if (feedback.has_migration_target_maps()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
}
- effect = graph()->NewNode(
- simplified()->DynamicCheckMaps(
- flags, feedback.handler(), source,
- feedback.is_monomorphic()
- ? DynamicCheckMapsParameters::ICState::kMonomorphic
- : DynamicCheckMapsParameters::ICState::kPolymorphic),
- receiver, effect, control);
+ effect =
+ graph()->NewNode(simplified()->DynamicCheckMaps(flags, feedback.handler(),
+ feedback.map(), source),
+ receiver, effect, control);
value = access_builder.BuildMinimorphicLoadDataField(
feedback.name(), access_info, receiver, &effect, &control);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index f905505e2a..dccc9558b5 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -287,6 +287,7 @@ std::ostream& operator<<(std::ostream& os, NamedAccess const& p) {
NamedAccess const& NamedAccessOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSLoadNamed ||
+ op->opcode() == IrOpcode::kJSLoadNamedFromSuper ||
op->opcode() == IrOpcode::kJSStoreNamed);
return OpParameter<NamedAccess>(op);
}
@@ -918,6 +919,19 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
access); // parameter
}
+const Operator* JSOperatorBuilder::LoadNamedFromSuper(Handle<Name> name) {
+ static constexpr int kReceiver = 1;
+ static constexpr int kHomeObject = 1;
+ static constexpr int kArity = kReceiver + kHomeObject;
+ // TODO(marja, v8:9237): Use real feedback.
+ NamedAccess access(LanguageMode::kSloppy, name, FeedbackSource());
+ return zone()->New<Operator1<NamedAccess>>( // --
+ IrOpcode::kJSLoadNamedFromSuper, Operator::kNoProperties, // opcode
+ "JSLoadNamedFromSuper", // name
+ kArity, 1, 1, 1, 1, 2, // counts
+ access); // parameter
+}
+
const Operator* JSOperatorBuilder::LoadProperty(
FeedbackSource const& feedback) {
PropertyAccess access(LanguageMode::kSloppy, feedback);
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 1df5326ae4..4043969000 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -921,6 +921,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadProperty(FeedbackSource const& feedback);
const Operator* LoadNamed(Handle<Name> name, FeedbackSource const& feedback);
+ const Operator* LoadNamedFromSuper(Handle<Name> name);
const Operator* StoreProperty(LanguageMode language_mode,
FeedbackSource const& feedback);
@@ -1389,6 +1390,22 @@ class JSLoadNamedNode final : public JSNodeWrapperBase {
#undef INPUTS
};
+class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
+ public:
+ explicit constexpr JSLoadNamedFromSuperNode(Node* node)
+ : JSNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
+ }
+
+ const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
+
+#define INPUTS(V) \
+ V(Receiver, receiver, 0, Object) \
+ V(Object, home_object, 1, Object)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
class JSStoreNamedNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreNamedNode(Node* node) : JSNodeWrapperBase(node) {
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 499af9442c..46018225a3 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -1589,11 +1589,13 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
NodeProperties::ChangeOp(node, jsgraph->common()->Call(call_descriptor));
}
+#ifndef V8_NO_ARGUMENTS_ADAPTOR
bool NeedsArgumentAdaptorFrame(SharedFunctionInfoRef shared, int arity) {
static const int sentinel = kDontAdaptArgumentsSentinel;
const int num_decl_parms = shared.internal_formal_parameter_count();
return (num_decl_parms != arity && num_decl_parms != sentinel);
}
+#endif
} // namespace
@@ -1779,6 +1781,26 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
+#ifdef V8_NO_ARGUMENTS_ADAPTOR
+ int formal_count = shared->internal_formal_parameter_count();
+ if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
+ node->RemoveInput(n.FeedbackVectorIndex());
+ // Underapplication. Massage the arguments to match the expected number of
+ // arguments.
+ for (int i = arity; i < formal_count; i++) {
+ node->InsertInput(graph()->zone(), arity + 2,
+ jsgraph()->UndefinedConstant());
+ }
+
+ // Patch {node} to a direct call.
+ node->InsertInput(graph()->zone(), formal_count + 2, new_target);
+ node->InsertInput(graph()->zone(), formal_count + 3,
+ jsgraph()->Constant(arity));
+ NodeProperties::ChangeOp(node,
+ common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + formal_count,
+ flags | CallDescriptor::kCanUseRoots)));
+#else
if (NeedsArgumentAdaptorFrame(*shared, arity)) {
node->RemoveInput(n.FeedbackVectorIndex());
@@ -1826,6 +1848,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
}
+#endif
} else if (shared->HasBuiltinId() &&
Builtins::IsCpp(shared->builtin_id())) {
// Patch {node} to a direct CEntry call.
@@ -2406,7 +2429,16 @@ Reduction JSTypedLowering::ReduceJSResolvePromise(Node* node) {
Reduction JSTypedLowering::Reduce(Node* node) {
DisallowHeapAccess no_heap_access;
- switch (node->opcode()) {
+ const IrOpcode::Value opcode = node->opcode();
+ if (broker()->generate_full_feedback_collection() &&
+ IrOpcode::IsFeedbackCollectingOpcode(opcode)) {
+ // In NCI code, it is not valid to reduce feedback-collecting JS opcodes
+ // into non-feedback-collecting lower-level opcodes; missed feedback would
+ // result in soft deopts.
+ return NoChange();
+ }
+
+ switch (opcode) {
case IrOpcode::kJSEqual:
return ReduceJSEqual(node);
case IrOpcode::kJSStrictEqual:
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index b8c14db539..cde4b96c87 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -94,6 +94,12 @@ int CallDescriptor::GetFirstUnusedStackSlot() const {
int CallDescriptor::GetStackParameterDelta(
CallDescriptor const* tail_caller) const {
+ // In the IsTailCallForTierUp case, the callee has
+ // identical linkage and runtime arguments to the caller, thus the stack
+ // parameter delta is 0. We don't explicitly pass the runtime arguments as
+ // inputs to the TailCall node, since they already exist on the stack.
+ if (IsTailCallForTierUp()) return 0;
+
int callee_slots_above_sp = GetFirstUnusedStackSlot();
int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
@@ -103,6 +109,7 @@ int CallDescriptor::GetStackParameterDelta(
// of padding.
++stack_param_delta;
} else {
+ DCHECK_NE(tail_caller_slots_above_sp % 2, 0);
// The delta is odd because of the caller. We already have one slot of
// padding that we can reuse for arguments, so we will need one fewer
// slot.
@@ -147,9 +154,7 @@ bool CallDescriptor::CanTailCall(const CallDescriptor* callee) const {
int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
switch (kind_) {
case kCallJSFunction:
- return PushArgumentCount()
- ? OptimizedBuiltinFrameConstants::kFixedSlotCount
- : StandardFrameConstants::kFixedSlotCount;
+ return StandardFrameConstants::kFixedSlotCount;
case kCallAddress:
if (code_kind == CodeKind::C_WASM_ENTRY) {
return CWasmEntryFrameConstants::kFixedSlotCount;
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index e14f9cd8a4..d96b049d92 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -205,6 +205,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kCallBuiltinPointer, // target is a builtin pointer
};
+ // NOTE: The lowest 10 bits of the Flags field are encoded in InstructionCode
+ // (for use in the code generator). All higher bits are lost.
+ static constexpr int kFlagsBitsEncodedInInstructionCode = 10;
enum Flag {
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
@@ -214,17 +217,36 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
- // Push argument count as part of function prologue.
- kPushArgumentCount = 1u << 5,
// Use retpoline for this call if indirect.
- kRetpoline = 1u << 6,
+ kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 7,
- kCallerSavedRegisters = 1u << 8,
+ kFixedTargetRegister = 1u << 6,
+ kCallerSavedRegisters = 1u << 7,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
- kCallerSavedFPRegisters = 1u << 9,
+ kCallerSavedFPRegisters = 1u << 8,
+ // Tail calls for tier up are special (in fact they are different enough
+ // from normal tail calls to warrant a dedicated opcode; but they also have
+ // enough similar aspects that reusing the TailCall opcode is pragmatic).
+ // Specifically:
+ //
+ // 1. Caller and callee are both JS-linkage Code objects.
+ // 2. JS runtime arguments are passed unchanged from caller to callee.
+ // 3. JS runtime arguments are not attached as inputs to the TailCall node.
+ // 4. Prior to the tail call, frame and register state is torn down to just
+ // before the caller frame was constructed.
+ // 5. Unlike normal tail calls, arguments adaptor frames (if present) are
+ // *not* torn down.
+ //
+ // In other words, behavior is identical to a jmp instruction prior caller
+ // frame construction.
+ kIsTailCallForTierUp = 1u << 9,
+
+ // Flags past here are *not* encoded in InstructionCode and are thus not
+ // accessible from the code generator. See also
+ // kFlagsBitsEncodedInInstructionCode.
+
// AIX has a function descriptor by default but it can be disabled for a
// certain CFunction call (only used for Kind::kCallAddress).
kNoFunctionDescriptor = 1u << 10,
@@ -317,7 +339,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
return flags() & kInitializeRootRegister;
}
@@ -327,6 +348,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool NeedsCallerSavedFPRegisters() const {
return flags() & kCallerSavedFPRegisters;
}
+ bool IsTailCallForTierUp() const { return flags() & kIsTailCallForTierUp; }
bool NoFunctionDescriptor() const { return flags() & kNoFunctionDescriptor; }
LinkageLocation GetReturnLocation(size_t index) const {
@@ -511,22 +533,22 @@ class V8_EXPORT_PRIVATE Linkage : public NON_EXPORTED_BASE(ZoneObject) {
}
// A special {Parameter} index for JSCalls that represents the new target.
- static int GetJSCallNewTargetParamIndex(int parameter_count) {
+ static constexpr int GetJSCallNewTargetParamIndex(int parameter_count) {
return parameter_count + 0; // Parameter (arity + 0) is special.
}
// A special {Parameter} index for JSCalls that represents the argument count.
- static int GetJSCallArgCountParamIndex(int parameter_count) {
+ static constexpr int GetJSCallArgCountParamIndex(int parameter_count) {
return parameter_count + 1; // Parameter (arity + 1) is special.
}
// A special {Parameter} index for JSCalls that represents the context.
- static int GetJSCallContextParamIndex(int parameter_count) {
+ static constexpr int GetJSCallContextParamIndex(int parameter_count) {
return parameter_count + 2; // Parameter (arity + 2) is special.
}
// A special {Parameter} index for JSCalls that represents the closure.
- static const int kJSCallClosureParamIndex = -1;
+ static constexpr int kJSCallClosureParamIndex = -1;
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 740a4f293c..55f39d76e8 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -1659,7 +1659,7 @@ struct BitfieldCheck {
Uint32BinopMatcher eq(node);
if (eq.left().IsWord32And()) {
Uint32BinopMatcher mand(eq.left().node());
- if (mand.right().HasValue()) {
+ if (mand.right().HasValue() && eq.right().HasValue()) {
BitfieldCheck result{mand.left().node(), mand.right().Value(),
eq.right().Value(), false};
if (mand.left().IsTruncateInt64ToInt32()) {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 644c445b94..98befab060 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -495,7 +495,7 @@ ShiftKind ShiftKindOf(Operator const* op) {
V(V16x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
V(V8x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
V(V8x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
- V(S8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
+ V(I8x16Swizzle, Operator::kNoProperties, 2, 0, 1)
// The format is:
// V(Name, properties, value_input_count, control_input_count, output_count)
@@ -1575,7 +1575,7 @@ std::ostream& operator<<(std::ostream& os, S128ImmediateParameter const& p) {
}
S128ImmediateParameter const& S128ImmediateParameterOf(Operator const* op) {
- DCHECK(IrOpcode::kS8x16Shuffle == op->opcode() ||
+ DCHECK(IrOpcode::kI8x16Shuffle == op->opcode() ||
IrOpcode::kS128Const == op->opcode());
return OpParameter<S128ImmediateParameter>(op);
}
@@ -1586,10 +1586,10 @@ const Operator* MachineOperatorBuilder::S128Const(const uint8_t value[16]) {
S128ImmediateParameter(value));
}
-const Operator* MachineOperatorBuilder::S8x16Shuffle(
+const Operator* MachineOperatorBuilder::I8x16Shuffle(
const uint8_t shuffle[16]) {
return zone_->New<Operator1<S128ImmediateParameter>>(
- IrOpcode::kS8x16Shuffle, Operator::kPure, "Shuffle", 2, 0, 0, 1, 0, 0,
+ IrOpcode::kI8x16Shuffle, Operator::kPure, "Shuffle", 2, 0, 0, 1, 0, 0,
S128ImmediateParameter(shuffle));
}
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 5f7c900466..702c050223 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -197,8 +197,6 @@ ShiftKind ShiftKindOf(Operator const*) V8_WARN_UNUSED_RESULT;
// makes it easier to detect an overflow. This parameter is ignored on platforms
// like x64 and ia32 where a range overflow does not result in INT_MAX.
enum class TruncateKind { kArchitectureDefault, kSetOverflowToMin };
-std::ostream& operator<<(std::ostream& os, TruncateKind kind);
-size_t hash_value(TruncateKind kind);
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
@@ -767,8 +765,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Select();
const Operator* S128AndNot();
- const Operator* S8x16Swizzle();
- const Operator* S8x16Shuffle(const uint8_t shuffle[16]);
+ const Operator* I8x16Swizzle();
+ const Operator* I8x16Shuffle(const uint8_t shuffle[16]);
const Operator* V64x2AnyTrue();
const Operator* V64x2AllTrue();
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 0aba619ffe..f1faeec936 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -170,6 +170,7 @@
JS_CREATE_OP_LIST(V) \
V(JSLoadProperty) \
V(JSLoadNamed) \
+ V(JSLoadNamedFromSuper) \
V(JSLoadGlobal) \
V(JSStoreProperty) \
V(JSStoreNamed) \
@@ -482,6 +483,7 @@
V(StringToLowerCaseIntl) \
V(StringToNumber) \
V(StringToUpperCaseIntl) \
+ V(TierUpCheck) \
V(ToBoolean) \
V(TransitionAndStoreElement) \
V(TransitionAndStoreNonNumberElement) \
@@ -950,8 +952,8 @@
V(S128Xor) \
V(S128Select) \
V(S128AndNot) \
- V(S8x16Swizzle) \
- V(S8x16Shuffle) \
+ V(I8x16Swizzle) \
+ V(I8x16Shuffle) \
V(V64x2AnyTrue) \
V(V64x2AllTrue) \
V(V32x4AnyTrue) \
@@ -1068,6 +1070,55 @@ class V8_EXPORT_PRIVATE IrOpcode {
static bool IsContextChainExtendingOpcode(Value value) {
return kJSCreateFunctionContext <= value && value <= kJSCreateBlockContext;
}
+
+ // These opcode take the feedback vector as an input, and implement
+ // feedback-collecting logic in generic lowering.
+ static bool IsFeedbackCollectingOpcode(Value value) {
+#define CASE(Name, ...) \
+ case k##Name: \
+ return true;
+ switch (value) {
+ JS_ARITH_BINOP_LIST(CASE)
+ JS_ARITH_UNOP_LIST(CASE)
+ JS_BITWISE_BINOP_LIST(CASE)
+ JS_BITWISE_UNOP_LIST(CASE)
+ JS_COMPARE_BINOP_LIST(CASE)
+ case kJSCall:
+ case kJSCallWithArrayLike:
+ case kJSCallWithSpread:
+ case kJSCloneObject:
+ case kJSConstruct:
+ case kJSConstructWithArrayLike:
+ case kJSConstructWithSpread:
+ case kJSCreateEmptyLiteralArray:
+ case kJSCreateLiteralArray:
+ case kJSCreateLiteralObject:
+ case kJSCreateLiteralRegExp:
+ case kJSGetIterator:
+ case kJSGetTemplateObject:
+ case kJSHasProperty:
+ case kJSInstanceOf:
+ case kJSLoadGlobal:
+ case kJSLoadNamed:
+ case kJSLoadProperty:
+ case kJSStoreDataPropertyInLiteral:
+ case kJSStoreGlobal:
+ case kJSStoreInArrayLiteral:
+ case kJSStoreNamed:
+ case kJSStoreNamedOwn:
+ case kJSStoreProperty:
+ return true;
+ default:
+ return false;
+ }
+#undef CASE
+ UNREACHABLE();
+ }
+
+ static bool IsFeedbackCollectingOpcode(int16_t value) {
+ DCHECK(0 <= value && value <= kLast);
+ return IsFeedbackCollectingOpcode(static_cast<IrOpcode::Value>(value));
+ }
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index c7851dd263..c77249f621 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -80,6 +80,7 @@ bool OperatorProperties::NeedsExactContext(const Operator* op) {
case IrOpcode::kJSLoadContext:
case IrOpcode::kJSLoadModule:
case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadNamedFromSuper:
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreContext:
case IrOpcode::kJSStoreDataPropertyInLiteral:
@@ -193,6 +194,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Property access operations
case IrOpcode::kJSLoadNamed:
+ case IrOpcode::kJSLoadNamedFromSuper:
case IrOpcode::kJSStoreNamed:
case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreProperty:
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 8a1c86ee49..7b99d07b6b 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -22,7 +22,6 @@
#include "src/compiler/backend/instruction-selector.h"
#include "src/compiler/backend/instruction.h"
#include "src/compiler/backend/jump-threading.h"
-#include "src/compiler/backend/live-range-separator.h"
#include "src/compiler/backend/mid-tier-register-allocator.h"
#include "src/compiler/backend/move-optimizer.h"
#include "src/compiler/backend/register-allocator-verifier.h"
@@ -784,27 +783,6 @@ class LocalHeapScope {
OptimizedCompilationInfo* info_;
};
-// Scope that unparks the LocalHeap, if:
-// a) We have a JSHeapBroker,
-// b) Said JSHeapBroker has a LocalHeap, and
-// c) Said LocalHeap has been parked.
-// Used, for example, when printing the graph with --trace-turbo with a
-// previously parked LocalHeap.
-class UnparkedScopeIfNeeded {
- public:
- explicit UnparkedScopeIfNeeded(JSHeapBroker* broker) {
- if (broker != nullptr) {
- LocalHeap* local_heap = broker->local_heap();
- if (local_heap != nullptr && local_heap->IsParked()) {
- unparked_scope.emplace(local_heap);
- }
- }
- }
-
- private:
- base::Optional<UnparkedScope> unparked_scope;
-};
-
void PrintFunctionSource(OptimizedCompilationInfo* info, Isolate* isolate,
int source_id, Handle<SharedFunctionInfo> shared) {
if (!shared->script().IsUndefined(isolate)) {
@@ -881,7 +859,8 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
const bool print_code =
FLAG_print_code ||
(info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
+ (info->IsNativeContextIndependent() && FLAG_print_nci_code);
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
@@ -1422,17 +1401,14 @@ struct GraphBuilderPhase {
if (data->info()->bailout_on_uninitialized()) {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
- if (data->info()->IsNativeContextIndependent()) {
- flags |= BytecodeGraphBuilderFlag::kNativeContextIndependent;
- }
JSFunctionRef closure(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
data->broker(), temp_zone, closure.shared(), closure.feedback_vector(),
data->info()->osr_offset(), data->jsgraph(), frequency,
- data->source_positions(), SourcePosition::kNotInlined, flags,
- &data->info()->tick_counter());
+ data->source_positions(), SourcePosition::kNotInlined,
+ data->info()->code_kind(), flags, &data->info()->tick_counter());
}
};
@@ -1442,7 +1418,7 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
OptimizedCompilationInfo* info = data->info();
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
- data->jsgraph()->Dead());
+ data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
CheckpointElimination checkpoint_elimination(&graph_reducer);
@@ -1511,6 +1487,9 @@ struct TyperPhase {
LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
data->common(), temp_zone);
if (FLAG_turbo_loop_variable) induction_vars.Run();
+
+ // The typer inspects heap objects, so we need to unpark the local heap.
+ UnparkedScopeIfNeeded scope(data->broker());
typer->Run(roots, &induction_vars);
}
};
@@ -1538,7 +1517,7 @@ struct UntyperPhase {
}
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
RemoveTypeReducer remove_type_reducer;
AddReducer(data, &graph_reducer, &remove_type_reducer);
@@ -1559,7 +1538,7 @@ struct CopyMetadataForConcurrentCompilePhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
JSHeapCopyReducer heap_copy_reducer(data->broker());
AddReducer(data, &graph_reducer, &heap_copy_reducer);
@@ -1605,7 +1584,7 @@ struct TypedLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1625,6 +1604,7 @@ struct TypedLoweringPhase {
data->broker(), data->common(),
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
+
if (!data->info()->IsNativeContextIndependent()) {
AddReducer(data, &graph_reducer, &create_lowering);
}
@@ -1634,8 +1614,11 @@ struct TypedLoweringPhase {
AddReducer(data, &graph_reducer, &simple_reducer);
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
- // JSCreateLowering accesses the heap and therefore we need to unpark it.
+
+ // ConstantFoldingReducer, JSCreateLowering, JSTypedLowering, and
+ // TypedOptimization access the heap.
UnparkedScopeIfNeeded scope(data->broker());
+
graph_reducer.ReduceGraph();
}
};
@@ -1648,13 +1631,19 @@ struct EscapeAnalysisPhase {
EscapeAnalysis escape_analysis(data->jsgraph(),
&data->info()->tick_counter(), temp_zone);
escape_analysis.ReduceGraph();
+
GraphReducer reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
EscapeAnalysisReducer escape_reducer(&reducer, data->jsgraph(),
escape_analysis.analysis_result(),
temp_zone);
+
AddReducer(data, &reducer, &escape_reducer);
+
+ // EscapeAnalysisReducer accesses the heap.
+ UnparkedScopeIfNeeded scope(data->broker());
+
reducer.ReduceGraph();
// TODO(tebbi): Turn this into a debug mode check once we have confidence.
escape_reducer.VerifyReplacement();
@@ -1666,7 +1655,7 @@ struct TypeAssertionsPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
AddTypeAssertionsReducer type_assertions(&graph_reducer, data->jsgraph(),
temp_zone);
@@ -1683,6 +1672,10 @@ struct SimplifiedLoweringPhase {
data->source_positions(), data->node_origins(),
data->info()->GetPoisoningMitigationLevel(),
&data->info()->tick_counter());
+
+ // RepresentationChanger accesses the heap.
+ UnparkedScopeIfNeeded scope(data->broker());
+
lowering.LowerAllNodes();
}
};
@@ -1717,7 +1710,7 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
JSGenericLowering generic_lowering(data->jsgraph(), &graph_reducer,
data->broker());
@@ -1731,7 +1724,7 @@ struct EarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1808,7 +1801,7 @@ struct EffectControlLinearizationPhase {
// doing a common operator reducer and dead code elimination just before
// it, to eliminate conditional deopts with a constant condition.
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
@@ -1841,7 +1834,7 @@ struct LoadEliminationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone,
@@ -1862,6 +1855,7 @@ struct LoadEliminationPhase {
&graph_reducer, data->jsgraph(), data->broker());
TypeNarrowingReducer type_narrowing_reducer(&graph_reducer, data->jsgraph(),
data->broker());
+
AddReducer(data, &graph_reducer, &branch_condition_elimination);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &redundancy_elimination);
@@ -1872,6 +1866,10 @@ struct LoadEliminationPhase {
AddReducer(data, &graph_reducer, &checkpoint_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &value_numbering);
+
+ // ConstantFoldingReducer and TypedOptimization access the heap.
+ UnparkedScopeIfNeeded scope(data->broker());
+
graph_reducer.ReduceGraph();
}
};
@@ -1902,7 +1900,7 @@ struct LateOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -1930,7 +1928,7 @@ struct MachineOperatorOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
@@ -2001,7 +1999,7 @@ struct CsaEarlyOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
BranchElimination branch_condition_elimination(&graph_reducer,
@@ -2029,7 +2027,7 @@ struct CsaOptimizationPhase {
void Run(PipelineData* data, Zone* temp_zone) {
GraphReducer graph_reducer(temp_zone, data->graph(),
- &data->info()->tick_counter(),
+ &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
@@ -2200,17 +2198,6 @@ struct BuildBundlesPhase {
}
};
-struct SplinterLiveRangesPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(SplinterLiveRanges)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- LiveRangeSeparator live_range_splinterer(
- data->top_tier_register_allocation_data(), temp_zone);
- live_range_splinterer.Splinter();
- }
-};
-
-
template <typename RegAllocator>
struct AllocateGeneralRegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateGeneralRegisters)
@@ -2233,18 +2220,6 @@ struct AllocateFPRegistersPhase {
}
};
-
-struct MergeSplintersPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(MergeSplinteredRanges)
-
- void Run(PipelineData* pipeline_data, Zone* temp_zone) {
- TopTierRegisterAllocationData* data =
- pipeline_data->top_tier_register_allocation_data();
- LiveRangeMerger live_range_merger(data, temp_zone);
- live_range_merger.Merge();
- }
-};
-
struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
@@ -2888,15 +2863,12 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
info.profiler_data()->SetHash(graph_hash_before_scheduling);
}
- Handle<Code> code;
if (jump_opt.is_optimizable()) {
jump_opt.set_optimizing();
- code = pipeline.GenerateCode(call_descriptor).ToHandleChecked();
+ return pipeline.GenerateCode(call_descriptor);
} else {
- code = second_pipeline.FinalizeCode().ToHandleChecked();
+ return second_pipeline.FinalizeCode();
}
-
- return code;
}
struct BlockStartsAsJSON {
@@ -3164,7 +3136,7 @@ void Pipeline::GenerateCodeForWasmFunction(
PipelineRunScope scope(&data, "V8.WasmFullOptimization",
RuntimeCallCounterId::kOptimizeWasmFullOptimization);
GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(),
+ &data.info()->tick_counter(), data.broker(),
data.mcgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data.graph(),
data.common(), scope.zone());
@@ -3184,7 +3156,7 @@ void Pipeline::GenerateCodeForWasmFunction(
PipelineRunScope scope(&data, "V8.OptimizeWasmBaseOptimization",
RuntimeCallCounterId::kOptimizeWasmBaseOptimization);
GraphReducer graph_reducer(scope.zone(), data.graph(),
- &data.info()->tick_counter(),
+ &data.info()->tick_counter(), data.broker(),
data.mcgraph()->Dead());
ValueNumberingReducer value_numbering(scope.zone(), data.graph()->zone());
AddReducer(&data, &graph_reducer, &value_numbering);
@@ -3630,12 +3602,6 @@ void PipelineImpl::AllocateRegistersForTopTier(
#endif
RegisterAllocationFlags flags;
- if (data->info()->turbo_control_flow_aware_allocation()) {
- flags |= RegisterAllocationFlag::kTurboControlFlowAwareAllocation;
- }
- if (data->info()->turbo_preprocess_ranges()) {
- flags |= RegisterAllocationFlag::kTurboPreprocessRanges;
- }
if (data->info()->trace_turbo_allocation()) {
flags |= RegisterAllocationFlag::kTraceAllocation;
}
@@ -3661,25 +3627,12 @@ void PipelineImpl::AllocateRegistersForTopTier(
"PreAllocation", data->top_tier_register_allocation_data());
}
- if (info()->turbo_preprocess_ranges()) {
- Run<SplinterLiveRangesPhase>();
- if (info()->trace_turbo_json() && !data->MayHaveUnverifiableGraph()) {
- TurboCfgFile tcf(isolate());
- tcf << AsC1VRegisterAllocationData(
- "PostSplinter", data->top_tier_register_allocation_data());
- }
- }
-
Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
if (data->sequence()->HasFPVirtualRegisters()) {
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
- if (info()->turbo_preprocess_ranges()) {
- Run<MergeSplintersPhase>();
- }
-
Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
diff --git a/deps/v8/src/compiler/processed-feedback.h b/deps/v8/src/compiler/processed-feedback.h
index cd916f9b47..282923e0c3 100644
--- a/deps/v8/src/compiler/processed-feedback.h
+++ b/deps/v8/src/compiler/processed-feedback.h
@@ -177,19 +177,20 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
public:
MinimorphicLoadPropertyAccessFeedback(NameRef const& name,
FeedbackSlotKind slot_kind,
- bool is_monomorphic,
Handle<Object> handler,
+ MaybeHandle<Map> maybe_map,
bool has_migration_target_maps);
NameRef const& name() const { return name_; }
- bool is_monomorphic() const { return is_monomorphic_; }
+ bool is_monomorphic() const { return !maybe_map_.is_null(); }
Handle<Object> handler() const { return handler_; }
+ MaybeHandle<Map> map() const { return maybe_map_; }
bool has_migration_target_maps() const { return has_migration_target_maps_; }
private:
NameRef const name_;
- bool const is_monomorphic_;
Handle<Object> const handler_;
+ MaybeHandle<Map> const maybe_map_;
bool const has_migration_target_maps_;
};
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index dc25326735..4235160037 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -199,17 +199,49 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
MachineRepresentation::kFloat64) {
bool const is_heapnumber = !is_inobject || !FLAG_unbox_double_fields;
if (is_heapnumber) {
- FieldAccess const storage_access = {kTaggedBase,
- field_access.offset,
- name.object(),
- MaybeHandle<Map>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier,
- LoadSensitivity::kCritical,
- field_access.const_field_info};
- storage = *effect = graph()->NewNode(
- simplified()->LoadField(storage_access), storage, *effect, *control);
+ if (dependencies() == nullptr) {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect =
+ graph()->NewNode(simplified()->LoadField(storage_access), storage,
+ *effect, *control);
+ // We expect the loaded value to be a heap number here. With
+ // in-place field representation changes it is possible this is a
+ // no longer a heap number without map transitions. If we haven't taken
+ // a dependency on field representation, we should verify the loaded
+ // value is a heap number.
+ storage = *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ storage, *effect, *control);
+ Node* map = *effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ storage, *effect, *control);
+ Node* is_heap_number =
+ graph()->NewNode(simplified()->ReferenceEqual(), map,
+ jsgraph()->HeapNumberMapConstant());
+ *effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kNotAHeapNumber),
+ is_heap_number, *effect, *control);
+ } else {
+ FieldAccess const storage_access = {kTaggedBase,
+ field_access.offset,
+ name.object(),
+ MaybeHandle<Map>(),
+ Type::OtherInternal(),
+ MachineType::TaggedPointer(),
+ kPointerWriteBarrier,
+ LoadSensitivity::kCritical,
+ field_access.const_field_info};
+ storage = *effect =
+ graph()->NewNode(simplified()->LoadField(storage_access), storage,
+ *effect, *control);
+ }
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
diff --git a/deps/v8/src/compiler/scheduled-machine-lowering.cc b/deps/v8/src/compiler/scheduled-machine-lowering.cc
index 9395c83e57..903052be1d 100644
--- a/deps/v8/src/compiler/scheduled-machine-lowering.cc
+++ b/deps/v8/src/compiler/scheduled-machine-lowering.cc
@@ -19,7 +19,7 @@ ScheduledMachineLowering::ScheduledMachineLowering(
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
PoisoningMitigationLevel poison_level)
: schedule_(schedule),
- graph_assembler_(js_graph, temp_zone, schedule),
+ graph_assembler_(js_graph, temp_zone, base::nullopt, schedule),
select_lowering_(&graph_assembler_, js_graph->graph()),
memory_lowering_(js_graph, temp_zone, &graph_assembler_, poison_level),
reducers_({&select_lowering_, &memory_lowering_}, temp_zone),
diff --git a/deps/v8/src/compiler/serializer-for-background-compilation.cc b/deps/v8/src/compiler/serializer-for-background-compilation.cc
index a828010ee1..83b88cd3bf 100644
--- a/deps/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/deps/v8/src/compiler/serializer-for-background-compilation.cc
@@ -198,6 +198,7 @@ namespace compiler {
V(LdaLookupSlot) \
V(LdaLookupSlotInsideTypeof) \
V(LdaNamedProperty) \
+ V(LdaNamedPropertyFromSuper) \
V(LdaNamedPropertyNoFeedback) \
V(LdaNull) \
V(Ldar) \
@@ -3260,6 +3261,13 @@ void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad);
}
+void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
+ BytecodeArrayIterator* iterator) {
+ NameRef(broker(),
+ iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ // TODO(marja, v8:9237): Process feedback once it's added to the byte code.
+}
+
// TODO(neis): Do feedback-independent serialization also for *NoFeedback
// bytecodes.
void SerializerForBackgroundCompilation::VisitLdaNamedPropertyNoFeedback(
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index a875cfada3..f832107939 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -5,6 +5,7 @@
#include "src/compiler/simd-scalar-lowering.h"
#include "src/codegen/machine-type.h"
+#include "src/common/globals.h"
#include "src/compiler/diamond.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
@@ -29,6 +30,7 @@ static const int32_t kShift8 = 24;
static const int32_t kShiftMask8 = 0x7;
static const int32_t kShiftMask16 = 0xF;
static const int32_t kShiftMask32 = 0x1F;
+static const int32_t kShiftMask64 = 0x3F;
// Shift values are taken modulo lane size. This helper calculates the mask
// required for different shift opcodes.
@@ -46,6 +48,10 @@ int GetMaskForShift(Node* node) {
case IrOpcode::kI32x4ShrS:
case IrOpcode::kI32x4ShrU:
return kShiftMask32;
+ case IrOpcode::kI64x2Shl:
+ case IrOpcode::kI64x2ShrS:
+ case IrOpcode::kI64x2ShrU:
+ return kShiftMask64;
default:
UNIMPLEMENTED();
}
@@ -103,7 +109,17 @@ void SimdScalarLowering::LowerGraph() {
}
}
-#define FOREACH_INT64X2_OPCODE(V) V(I64x2Splat)
+#define FOREACH_INT64X2_OPCODE(V) \
+ V(I64x2Splat) \
+ V(I64x2ExtractLane) \
+ V(I64x2ReplaceLane) \
+ V(I64x2Neg) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I64x2Add) \
+ V(I64x2Sub) \
+ V(I64x2Mul)
#define FOREACH_INT32X4_OPCODE(V) \
V(I32x4Splat) \
@@ -128,15 +144,15 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4LtS) \
- V(I32x4LeS) \
+ V(I32x4LtU) \
V(I32x4GtS) \
+ V(I32x4GtU) \
+ V(I32x4LeS) \
+ V(I32x4LeU) \
V(I32x4GeS) \
+ V(I32x4GeU) \
V(I32x4UConvertI16x8Low) \
V(I32x4UConvertI16x8High) \
- V(I32x4LtU) \
- V(I32x4LeU) \
- V(I32x4GtU) \
- V(I32x4GeU) \
V(I32x4Abs) \
V(S128And) \
V(S128Or) \
@@ -150,7 +166,19 @@ void SimdScalarLowering::LowerGraph() {
V(V8x16AllTrue) \
V(I32x4BitMask)
-#define FOREACH_FLOAT64X2_OPCODE(V) V(F64x2Splat)
+#define FOREACH_FLOAT64X2_OPCODE(V) \
+ V(F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(F64x2Abs) \
+ V(F64x2Neg) \
+ V(F64x2Sqrt) \
+ V(F64x2Add) \
+ V(F64x2Sub) \
+ V(F64x2Mul) \
+ V(F64x2Div) \
+ V(F64x2Min) \
+ V(F64x2Max)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(F32x4Splat) \
@@ -171,6 +199,12 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Min) \
V(F32x4Max)
+#define FOREACH_FLOAT64x2_TO_INT64x2OPCODE(V) \
+ V(F64x2Eq) \
+ V(F64x2Ne) \
+ V(F64x2Lt) \
+ V(F64x2Le)
+
#define FOREACH_FLOAT32X4_TO_INT32X4OPCODE(V) \
V(F32x4Eq) \
V(F32x4Ne) \
@@ -209,9 +243,13 @@ void SimdScalarLowering::LowerGraph() {
V(I16x8Eq) \
V(I16x8Ne) \
V(I16x8LtS) \
- V(I16x8LeS) \
V(I16x8LtU) \
+ V(I16x8GtS) \
+ V(I16x8GtU) \
+ V(I16x8LeS) \
V(I16x8LeU) \
+ V(I16x8GeS) \
+ V(I16x8GeU) \
V(I16x8RoundingAverageU) \
V(I16x8Abs) \
V(I16x8BitMask)
@@ -241,11 +279,15 @@ void SimdScalarLowering::LowerGraph() {
V(I8x16Eq) \
V(I8x16Ne) \
V(I8x16LtS) \
- V(I8x16LeS) \
V(I8x16LtU) \
+ V(I8x16GtS) \
+ V(I8x16GtU) \
+ V(I8x16LeS) \
V(I8x16LeU) \
- V(S8x16Swizzle) \
- V(S8x16Shuffle) \
+ V(I8x16GeS) \
+ V(I8x16GeU) \
+ V(I8x16Swizzle) \
+ V(I8x16Shuffle) \
V(I8x16RoundingAverageU) \
V(I8x16Abs) \
V(I8x16BitMask)
@@ -294,6 +336,10 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
+ FOREACH_FLOAT64x2_TO_INT64x2OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
+ }
FOREACH_INT16X8_OPCODE(CASE_STMT) {
replacements_[node->id()].type = SimdType::kInt16x8;
break;
@@ -314,6 +360,9 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case LoadTransformation::kS32x4LoadSplat:
replacements_[node->id()].type = SimdType::kInt32x4;
break;
+ case LoadTransformation::kS64x2LoadSplat:
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
case LoadTransformation::kI16x8Load8x8S:
case LoadTransformation::kI16x8Load8x8U:
replacements_[node->id()].type = SimdType::kInt16x8;
@@ -322,6 +371,10 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case LoadTransformation::kI32x4Load16x4U:
replacements_[node->id()].type = SimdType::kInt32x4;
break;
+ case LoadTransformation::kI64x2Load32x2S:
+ case LoadTransformation::kI64x2Load32x2U:
+ replacements_[node->id()].type = SimdType::kInt64x2;
+ break;
default:
UNIMPLEMENTED();
}
@@ -526,9 +579,18 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
load_rep = MachineType::Uint16();
load_type = SimdType::kInt16x8;
break;
+ case LoadTransformation::kI64x2Load32x2S:
+ load_rep = MachineType::Int32();
+ load_type = SimdType::kInt32x4;
+ break;
+ case LoadTransformation::kI64x2Load32x2U:
+ load_rep = MachineType::Uint32();
+ load_type = SimdType::kInt32x4;
+ break;
case LoadTransformation::kS8x16LoadSplat:
case LoadTransformation::kS16x8LoadSplat:
case LoadTransformation::kS32x4LoadSplat:
+ case LoadTransformation::kS64x2LoadSplat:
load_rep = MachineTypeFrom(type);
break;
default:
@@ -569,6 +631,15 @@ void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
control_input);
effect_input = rep_nodes[i];
+
+ // Load operations are Word32 nodes, change them to Word64.
+ if (params.transformation == LoadTransformation::kI64x2Load32x2S) {
+ rep_nodes[i] =
+ graph()->NewNode(machine()->ChangeInt32ToInt64(), rep_nodes[i]);
+ } else if (params.transformation == LoadTransformation::kI64x2Load32x2U) {
+ rep_nodes[i] =
+ graph()->NewNode(machine()->ChangeUint32ToUint64(), rep_nodes[i]);
+ }
}
} else {
// Load splat, load from the same index for every lane.
@@ -687,15 +758,8 @@ void SimdScalarLowering::LowerCompareOp(Node* node, SimdType input_rep_type,
} else {
cmp_result = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
- Diamond d_cmp(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), cmp_result,
- mcgraph_->Int32Constant(0)));
- MachineRepresentation rep =
- (input_rep_type == SimdType::kFloat32x4)
- ? MachineRepresentation::kWord32
- : MachineTypeFrom(input_rep_type).representation();
- rep_node[i] =
- d_cmp.Phi(rep, mcgraph_->Int32Constant(0), mcgraph_->Int32Constant(-1));
+ Diamond d_cmp(graph(), common(), cmp_result);
+ rep_node[i] = ConstructPhiForComparison(d_cmp, input_rep_type, -1, 0);
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -999,6 +1063,14 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
break;
+ case IrOpcode::kI64x2ShrU:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word64Shr(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI64x2Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word64Shl(), rep_node[i], shift_node);
+ break;
case IrOpcode::kI32x4Shl:
rep_node[i] =
graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
@@ -1013,6 +1085,10 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
rep_node[i] = FixUpperBits(rep_node[i], kShift8);
break;
+ case IrOpcode::kI64x2ShrS:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word64Sar(), rep_node[i], shift_node);
+ break;
case IrOpcode::kI32x4ShrS:
case IrOpcode::kI16x8ShrS:
case IrOpcode::kI8x16ShrS:
@@ -1026,6 +1102,26 @@ void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
ReplaceNode(node, rep_node, num_lanes);
}
+Node* SimdScalarLowering::ConstructPhiForComparison(Diamond d,
+ SimdType rep_type,
+ int true_value,
+ int false_value) {
+ // Close the given Diamond d using a Phi node, taking care of constructing the
+ // right kind of constants (Int32 or Int64) based on rep_type.
+ if (rep_type == SimdType::kFloat64x2) {
+ MachineRepresentation rep = MachineRepresentation::kWord64;
+ return d.Phi(rep, mcgraph_->Int64Constant(true_value),
+ mcgraph_->Int64Constant(false_value));
+ } else {
+ MachineRepresentation rep =
+ (rep_type == SimdType::kFloat32x4)
+ ? MachineRepresentation::kWord32
+ : MachineTypeFrom(rep_type).representation();
+ return d.Phi(rep, mcgraph_->Int32Constant(true_value),
+ mcgraph_->Int32Constant(false_value));
+ }
+}
+
void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
const Operator* op) {
DCHECK_EQ(2, node->InputCount());
@@ -1036,12 +1132,7 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
- MachineRepresentation rep =
- (input_rep_type == SimdType::kFloat32x4)
- ? MachineRepresentation::kWord32
- : MachineTypeFrom(input_rep_type).representation();
- rep_node[i] =
- d.Phi(rep, mcgraph_->Int32Constant(0), mcgraph_->Int32Constant(-1));
+ rep_node[i] = ConstructPhiForComparison(d, input_rep_type, 0, -1);
}
ReplaceNode(node, rep_node, num_lanes);
}
@@ -1084,21 +1175,99 @@ void SimdScalarLowering::LowerBitMaskOp(Node* node, SimdType rep_type,
ReplaceNode(node, rep_node, 1);
}
+void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
+ // AllTrue ops require the input to be of a particular SimdType, but the op
+ // itself is always replaced by a Int32x4 with 1 node.
+ int num_lanes = NumLanes(rep_type);
+ DCHECK_EQ(1, node->InputCount());
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node* zero = mcgraph_->Int32Constant(0);
+ Node* tmp_result = mcgraph_->Int32Constant(1);
+ for (int i = 0; i < num_lanes; ++i) {
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), rep[i], zero));
+ tmp_result = d.Phi(MachineRepresentation::kWord32, zero, tmp_result);
+ }
+ rep_node[0] = tmp_result;
+ for (int i = 1; i < num_lanes; ++i) {
+ rep_node[i] = nullptr;
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
int num_lanes = NumLanes(rep_type);
switch (node->opcode()) {
case IrOpcode::kS128Const: {
- // Lower 128.const to 4 Int32Constant.
+ // We could use GetReplacementsWithType for all this, but it adds a lot of
+ // nodes, so sign extend the immediates ourselves here.
DCHECK_EQ(0, node->InputCount());
- constexpr int kNumLanes = kSimd128Size / sizeof(uint32_t);
- uint32_t val[kNumLanes];
- memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
- Node** rep_node = zone()->NewArray<Node*>(kNumLanes);
- for (int i = 0; i < kNumLanes; ++i) {
- rep_node[i] = mcgraph_->Int32Constant(val[i]);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ S128ImmediateParameter params = S128ImmediateParameterOf(node->op());
+
+ // For all the small ints below, we have a choice of static_cast or bit
+ // twiddling, clang seems to be able to optimize either
+ // (https://godbolt.org/z/9c65o8) so use static_cast for clarity.
+ switch (rep_type) {
+ case SimdType::kInt8x16: {
+ for (int i = 0; i < num_lanes; ++i) {
+ Address data_address = reinterpret_cast<Address>(params.data() + i);
+ rep_node[i] = mcgraph_->Int32Constant(
+ static_cast<int32_t>(static_cast<int8_t>(
+ base::ReadLittleEndianValue<int8_t>(data_address))));
+ }
+ break;
+ }
+ case SimdType::kInt16x8: {
+ int16_t val[kNumLanes16];
+ memcpy(val, params.data(), kSimd128Size);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = mcgraph_->Int32Constant(static_cast<int32_t>(
+ base::ReadLittleEndianValue<int16_t>(&val[i])));
+ }
+ break;
+ }
+ case SimdType::kInt32x4: {
+ uint32_t val[kNumLanes32];
+ memcpy(val, params.data(), kSimd128Size);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = mcgraph_->Int32Constant(
+ base::ReadLittleEndianValue<uint32_t>(&val[i]));
+ }
+ break;
+ }
+ case SimdType::kInt64x2: {
+ uint64_t val[kNumLanes64];
+ memcpy(val, params.data(), kSimd128Size);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = mcgraph_->Int64Constant(
+ base::ReadLittleEndianValue<uint64_t>(&val[i]));
+ }
+ break;
+ }
+ case SimdType::kFloat32x4: {
+ float val[kNumLanes32];
+ memcpy(val, params.data(), kSimd128Size);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = mcgraph_->Float32Constant(
+ base::ReadLittleEndianValue<float>(&val[i]));
+ }
+ break;
+ }
+ case SimdType::kFloat64x2: {
+ double val[kNumLanes64];
+ memcpy(val, params.data(), kSimd128Size);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = mcgraph_->Float64Constant(
+ base::ReadLittleEndianValue<double>(&val[i]));
+ }
+ break;
+ }
}
- ReplaceNode(node, rep_node, kNumLanes);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kStart: {
@@ -1152,7 +1321,9 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
case IrOpcode::kSimd128ReverseBytes: {
DCHECK_EQ(1, node->InputCount());
- bool is_float = ReplacementType(node->InputAt(0)) == SimdType::kFloat32x4;
+ SimdType input_type = ReplacementType(node->InputAt(0));
+ bool is_float = input_type == SimdType::kFloat32x4 ||
+ input_type == SimdType::kFloat64x2;
replacements_[node->id()].type =
is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4;
Node** rep = GetReplacementsWithType(
@@ -1189,7 +1360,30 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kReturn: {
+ int old_input_count = node->InputCount();
+ int return_arity = static_cast<int>(signature()->return_count());
+ for (int i = 0; i < return_arity; i++) {
+ if (signature()->GetReturn(i) != MachineRepresentation::kSimd128) {
+ continue;
+ }
+
+ // Return nodes have a hidden input at value 0.
+ Node* input = node->InputAt(i + 1);
+ if (!HasReplacement(0, input)) {
+ continue;
+ }
+
+ // V128 return types are lowered to i32x4.
+ Node** reps = GetReplacementsWithType(input, rep_type);
+ ReplaceNode(input, reps, NumLanes(rep_type));
+ }
+
DefaultLowering(node);
+ // Nothing needs to be done here since inputs did not change.
+ if (old_input_count == node->InputCount()) {
+ break;
+ }
+
int new_return_count = GetReturnCountAfterLoweringSimd128(signature());
if (static_cast<int>(signature()->return_count()) != new_return_count) {
NodeProperties::ChangeOp(node, common()->Return(new_return_count));
@@ -1204,6 +1398,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
GetReturnCountAfterLoweringSimd128(call_descriptor) !=
static_cast<int>(call_descriptor->ReturnCount());
+ // All call arguments are lowered to i32x4 in the call descriptor, so the
+ // arguments need to be converted to i32x4 as well.
+ for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+ Node* input = node->InputAt(i);
+ if (HasReplacement(0, input)) {
+ Node** reps = GetReplacementsWithType(input, SimdType::kInt32x4);
+ ReplaceNode(input, reps, NumLanes(SimdType::kInt32x4));
+ }
+ }
+
if (DefaultLowering(node) || returns_require_lowering) {
// We have to adjust the call descriptor.
const Operator* op = common()->Call(
@@ -1216,6 +1420,21 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
size_t return_arity = call_descriptor->ReturnCount();
+
+ if (return_arity == 1) {
+ // We access the additional return values through projections.
+ // Special case for return_arity 1, with multi-returns, we would have
+ // already built projections for each return value, and will be handled
+ // by the following code.
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
+ rep_node[i] =
+ graph()->NewNode(common()->Projection(i), node, graph()->start());
+ }
+ ReplaceNode(node, rep_node, kNumLanes32);
+ break;
+ }
+
ZoneVector<Node*> projections(return_arity, zone());
NodeProperties::CollectValueProjections(node, projections.data(),
return_arity);
@@ -1261,6 +1480,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kI64x2Add: {
+ LowerBinaryOp(node, rep_type, machine()->Int64Add());
+ break;
+ }
+ case IrOpcode::kI64x2Sub: {
+ LowerBinaryOp(node, rep_type, machine()->Int64Sub());
+ break;
+ }
+ case IrOpcode::kI64x2Mul: {
+ LowerBinaryOp(node, rep_type, machine()->Int64Mul());
+ break;
+ }
#define I32X4_BINOP_CASE(opcode, instruction) \
case IrOpcode::opcode: { \
LowerBinaryOp(node, rep_type, machine()->instruction()); \
@@ -1340,6 +1571,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
break;
}
+ case IrOpcode::kI64x2Neg: {
+ DCHECK_EQ(1, node->InputCount());
+ Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node* zero = graph()->NewNode(common()->Int64Constant(0));
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = graph()->NewNode(machine()->Int64Sub(), zero, rep[i]);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+ break;
+ }
case IrOpcode::kI32x4Neg:
case IrOpcode::kI16x8Neg:
case IrOpcode::kI8x16Neg: {
@@ -1483,12 +1726,15 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerPack(node, SimdType::kInt16x8, SimdType::kInt8x16, false);
break;
}
+ case IrOpcode::kI64x2Shl:
case IrOpcode::kI32x4Shl:
case IrOpcode::kI16x8Shl:
case IrOpcode::kI8x16Shl:
+ case IrOpcode::kI64x2ShrS:
case IrOpcode::kI32x4ShrS:
case IrOpcode::kI16x8ShrS:
case IrOpcode::kI8x16ShrS:
+ case IrOpcode::kI64x2ShrU:
case IrOpcode::kI32x4ShrU:
case IrOpcode::kI16x8ShrU:
case IrOpcode::kI8x16ShrU: {
@@ -1544,6 +1790,42 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
+ case IrOpcode::kF64x2Abs: {
+ LowerUnaryOp(node, rep_type, machine()->Float64Abs());
+ break;
+ }
+ case IrOpcode::kF64x2Neg: {
+ LowerUnaryOp(node, rep_type, machine()->Float64Neg());
+ break;
+ }
+ case IrOpcode::kF64x2Sqrt: {
+ LowerUnaryOp(node, rep_type, machine()->Float64Sqrt());
+ break;
+ }
+ case IrOpcode::kF64x2Add: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Add());
+ break;
+ }
+ case IrOpcode::kF64x2Sub: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Sub());
+ break;
+ }
+ case IrOpcode::kF64x2Mul: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Mul());
+ break;
+ }
+ case IrOpcode::kF64x2Div: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Div());
+ break;
+ }
+ case IrOpcode::kF64x2Min: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Min());
+ break;
+ }
+ case IrOpcode::kF64x2Max: {
+ LowerBinaryOp(node, rep_type, machine()->Float64Max());
+ break;
+ }
case IrOpcode::kF64x2Splat:
case IrOpcode::kF32x4Splat:
case IrOpcode::kI64x2Splat:
@@ -1551,18 +1833,30 @@ void SimdScalarLowering::LowerNode(Node* node) {
case IrOpcode::kI16x8Splat:
case IrOpcode::kI8x16Splat: {
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ Node* val = (HasReplacement(0, node->InputAt(0)))
+ ? GetReplacements(node->InputAt(0))[0]
+ : node->InputAt(0);
+
+ // I16 and I8 are placed in Word32 nodes, we need to mask them
+ // accordingly, to account for overflows, then sign extend them.
+ if (node->opcode() == IrOpcode::kI16x8Splat) {
+ val = graph()->NewNode(machine()->SignExtendWord16ToInt32(),
+ Mask(val, kMask16));
+ } else if (node->opcode() == IrOpcode::kI8x16Splat) {
+ val = graph()->NewNode(machine()->SignExtendWord8ToInt32(),
+ Mask(val, kMask8));
+ }
+
for (int i = 0; i < num_lanes; ++i) {
- if (HasReplacement(0, node->InputAt(0))) {
- rep_node[i] = GetReplacements(node->InputAt(0))[0];
- } else {
- rep_node[i] = node->InputAt(0);
- }
+ rep_node[i] = val;
}
ReplaceNode(node, rep_node, num_lanes);
break;
}
- case IrOpcode::kI32x4ExtractLane:
+ case IrOpcode::kF64x2ExtractLane:
case IrOpcode::kF32x4ExtractLane:
+ case IrOpcode::kI64x2ExtractLane:
+ case IrOpcode::kI32x4ExtractLane:
case IrOpcode::kI16x8ExtractLaneU:
case IrOpcode::kI16x8ExtractLaneS:
case IrOpcode::kI8x16ExtractLaneU:
@@ -1573,11 +1867,14 @@ void SimdScalarLowering::LowerNode(Node* node) {
for (int i = 1; i < num_lanes; ++i) {
rep_node[i] = nullptr;
}
+
ReplaceNode(node, rep_node, num_lanes);
break;
}
- case IrOpcode::kI32x4ReplaceLane:
+ case IrOpcode::kF64x2ReplaceLane:
case IrOpcode::kF32x4ReplaceLane:
+ case IrOpcode::kI64x2ReplaceLane:
+ case IrOpcode::kI32x4ReplaceLane:
case IrOpcode::kI16x8ReplaceLane:
case IrOpcode::kI8x16ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
@@ -1601,6 +1898,9 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerCompareOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
break; \
}
+ COMPARISON_CASE(Float64x2, kF64x2Eq, Float64Equal, false)
+ COMPARISON_CASE(Float64x2, kF64x2Lt, Float64LessThan, false)
+ COMPARISON_CASE(Float64x2, kF64x2Le, Float64LessThanOrEqual, false)
COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
@@ -1634,6 +1934,10 @@ void SimdScalarLowering::LowerNode(Node* node) {
COMPARISON_CASE(Int8x16, kI8x16GtU, Uint32LessThan, true)
COMPARISON_CASE(Int8x16, kI8x16GeU, Uint32LessThanOrEqual, true)
#undef COMPARISON_CASE
+ case IrOpcode::kF64x2Ne: {
+ LowerNotEqual(node, SimdType::kFloat64x2, machine()->Float64Equal());
+ break;
+ }
case IrOpcode::kF32x4Ne: {
LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
@@ -1655,7 +1959,8 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kInt32x4 ||
ReplacementType(node->InputAt(0)) == SimdType::kInt16x8 ||
ReplacementType(node->InputAt(0)) == SimdType::kInt8x16);
- Node** boolean_input = GetReplacements(node->InputAt(0));
+ Node** boolean_input =
+ GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
@@ -1670,7 +1975,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
- case IrOpcode::kS8x16Swizzle: {
+ case IrOpcode::kI8x16Swizzle: {
DCHECK_EQ(2, node->InputCount());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** indices = GetReplacementsWithType(node->InputAt(1), rep_type);
@@ -1709,7 +2014,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_nodes, num_lanes);
break;
}
- case IrOpcode::kS8x16Shuffle: {
+ case IrOpcode::kI8x16Shuffle: {
DCHECK_EQ(2, node->InputCount());
S128ImmediateParameter shuffle = S128ImmediateParameterOf(node->op());
Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
@@ -1723,45 +2028,22 @@ void SimdScalarLowering::LowerNode(Node* node) {
break;
}
case IrOpcode::kV32x4AnyTrue:
- case IrOpcode::kV32x4AllTrue:
case IrOpcode::kV16x8AnyTrue:
- case IrOpcode::kV16x8AllTrue:
- case IrOpcode::kV8x16AnyTrue:
- case IrOpcode::kV8x16AllTrue: {
+ case IrOpcode::kV8x16AnyTrue: {
DCHECK_EQ(1, node->InputCount());
- SimdType input_rep_type = ReplacementType(node->InputAt(0));
- Node** rep;
- // If the input is a SIMD float, bitcast it to a SIMD int of the same
- // shape, because the comparisons below use Word32.
- if (input_rep_type == SimdType::kFloat32x4) {
- // TODO(v8:9418): f64x2 lowering is not implemented yet.
- rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32x4);
- } else {
- rep = GetReplacements(node->InputAt(0));
- }
- int input_num_lanes = NumLanes(input_rep_type);
+ // AnyTrue always returns a I32x4, and can work with inputs of any shape,
+ // but we still need GetReplacementsWithType if input is float.
+ DCHECK_EQ(ReplacementType(node), SimdType::kInt32x4);
+ Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
Node* true_node = mcgraph_->Int32Constant(1);
- Node* false_node = mcgraph_->Int32Constant(0);
- Node* tmp_result = false_node;
- if (node->opcode() == IrOpcode::kV32x4AllTrue ||
- node->opcode() == IrOpcode::kV16x8AllTrue ||
- node->opcode() == IrOpcode::kV8x16AllTrue) {
- tmp_result = true_node;
- }
- for (int i = 0; i < input_num_lanes; ++i) {
- Diamond is_false(
- graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rep[i], false_node));
- if (node->opcode() == IrOpcode::kV32x4AllTrue ||
- node->opcode() == IrOpcode::kV16x8AllTrue ||
- node->opcode() == IrOpcode::kV8x16AllTrue) {
- tmp_result = is_false.Phi(MachineRepresentation::kWord32, false_node,
- tmp_result);
- } else {
- tmp_result = is_false.Phi(MachineRepresentation::kWord32, tmp_result,
- true_node);
- }
+ Node* zero = mcgraph_->Int32Constant(0);
+ Node* tmp_result = zero;
+ for (int i = 0; i < num_lanes; ++i) {
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Word32Equal(), reps[i], zero));
+ tmp_result =
+ d.Phi(MachineRepresentation::kWord32, tmp_result, true_node);
}
rep_node[0] = tmp_result;
for (int i = 1; i < num_lanes; ++i) {
@@ -1770,6 +2052,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
ReplaceNode(node, rep_node, num_lanes);
break;
}
+ case IrOpcode::kV32x4AllTrue: {
+ LowerAllTrueOp(node, SimdType::kInt32x4);
+ break;
+ }
+ case IrOpcode::kV16x8AllTrue: {
+ LowerAllTrueOp(node, SimdType::kInt16x8);
+ break;
+ }
+ case IrOpcode::kV8x16AllTrue: {
+ LowerAllTrueOp(node, SimdType::kInt8x16);
+ break;
+ }
case IrOpcode::kI8x16BitMask: {
LowerBitMaskOp(node, rep_type, 7);
break;
@@ -1789,11 +2083,18 @@ void SimdScalarLowering::LowerNode(Node* node) {
Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
int num_lanes = NumLanes(rep_type);
Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ // Nodes are stored signed, so mask away the top bits.
// rounding_average(left, right) = (left + right + 1) >> 1
+ const int bit_mask = num_lanes == 16 ? kMask8 : kMask16;
for (int i = 0; i < num_lanes; ++i) {
+ Node* mask_left = graph()->NewNode(machine()->Word32And(), rep_left[i],
+ mcgraph_->Int32Constant(bit_mask));
+ Node* mask_right =
+ graph()->NewNode(machine()->Word32And(), rep_right[i],
+ mcgraph_->Int32Constant(bit_mask));
Node* left_plus_right_plus_one = graph()->NewNode(
machine()->Int32Add(),
- graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]),
+ graph()->NewNode(machine()->Int32Add(), mask_left, mask_right),
mcgraph_->Int32Constant(1));
rep_node[i] =
graph()->NewNode(machine()->Word32Shr(), left_plus_right_plus_one,
@@ -1862,6 +2163,28 @@ void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
}
}
+void SimdScalarLowering::Int64ToFloat64(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes64; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastInt64ToFloat64(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
+void SimdScalarLowering::Float64ToInt64(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes64; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastFloat64ToInt64(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
for (int i = 0; i < kNumLanes32; ++i) {
if (replacements[i] != nullptr) {
@@ -1873,6 +2196,26 @@ void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
}
}
+void SimdScalarLowering::Int64ToInt32(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int64_t) / sizeof(int32_t);
+ const int bit_size = sizeof(int32_t) * 8;
+ const Operator* truncate = machine()->TruncateInt64ToInt32();
+
+ for (int i = 0; i < kNumLanes64; i++) {
+ if (replacements[i] != nullptr) {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = graph()->NewNode(
+ truncate, graph()->NewNode(machine()->Word64Sar(), replacements[i],
+ mcgraph_->Int32Constant(j * bit_size)));
+ }
+ } else {
+ for (int j = 0; j < num_ints; j++) {
+ result[num_ints * i + j] = nullptr;
+ }
+ }
+ }
+}
+
template <typename T>
void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
const int num_ints = sizeof(int32_t) / sizeof(T);
@@ -1926,6 +2269,20 @@ void SimdScalarLowering::SmallerIntToInt32(Node** replacements, Node** result) {
}
}
+void SimdScalarLowering::Int32ToInt64(Node** replacements, Node** result) {
+ const int num_ints = sizeof(int64_t) / sizeof(int32_t);
+
+ for (int i = 0; i < kNumLanes64; i++) {
+ Node* i64 = graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ replacements[num_ints * i + 1]);
+ Node* high = graph()->NewNode(machine()->Word64Shl(), i64,
+ mcgraph_->Int32Constant(32));
+ Node* i64_low = graph()->NewNode(machine()->ChangeUint32ToUint64(),
+ replacements[num_ints * i]);
+ result[i] = graph()->NewNode(machine()->Word64Or(), high, i64_low);
+ }
+}
+
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
@@ -1933,8 +2290,22 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
}
int num_lanes = NumLanes(type);
Node** result = zone()->NewArray<Node*>(num_lanes);
- if (type == SimdType::kInt32x4) {
- if (ReplacementType(node) == SimdType::kFloat32x4) {
+ if (type == SimdType::kInt64x2) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToInt64(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kFloat64x2) {
+ Float64ToInt64(replacements, result);
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (type == SimdType::kInt32x4) {
+ if (ReplacementType(node) == SimdType::kInt64x2) {
+ Int64ToInt32(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kFloat64x2) {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, result);
+ } else if (ReplacementType(node) == SimdType::kFloat32x4) {
Float32ToInt32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
SmallerIntToInt32<int16_t>(replacements, result);
@@ -1943,8 +2314,24 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
} else {
UNREACHABLE();
}
+ } else if (type == SimdType::kFloat64x2) {
+ if (ReplacementType(node) == SimdType::kInt64x2) {
+ Int64ToFloat64(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt32x4) {
+ Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Int32ToInt64(replacements, int32_to_int64);
+ Int64ToFloat64(int32_to_int64, result);
+ } else {
+ UNIMPLEMENTED();
+ }
} else if (type == SimdType::kFloat32x4) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
+ if (ReplacementType(node) == SimdType::kFloat64x2) {
+ Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float64ToInt64(replacements, float64_to_int64);
+ Int64ToInt32(float64_to_int64, int64_to_int32);
+ Int32ToFloat32(int64_to_int32, result);
+ } else if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToFloat32(replacements, result);
} else if (ReplacementType(node) == SimdType::kInt16x8) {
UNIMPLEMENTED();
@@ -1958,13 +2345,23 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToSmallerInt<int16_t>(replacements, result);
} else if (ReplacementType(node) == SimdType::kFloat32x4) {
- UNIMPLEMENTED();
+ Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Float32ToInt32(replacements, float32_to_int32);
+ Int32ToSmallerInt<int16_t>(float32_to_int32, result);
} else {
UNREACHABLE();
}
} else if (type == SimdType::kInt8x16) {
- if (ReplacementType(node) == SimdType::kInt32x4) {
+ if (ReplacementType(node) == SimdType::kInt64x2) {
+ Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ Int64ToInt32(replacements, int64_to_int32);
+ Int32ToSmallerInt<int8_t>(int64_to_int32, result);
+ } else if (ReplacementType(node) == SimdType::kInt32x4) {
Int32ToSmallerInt<int8_t>(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt16x8) {
+ Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
+ SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
+ Int32ToSmallerInt<int8_t>(int16_to_int32, result);
} else {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index a852f94c7c..b86071f0ae 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_SIMD_SCALAR_LOWERING_H_
#include "src/compiler/common-operator.h"
+#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/machine-graph.h"
#include "src/compiler/machine-operator.h"
@@ -73,12 +74,16 @@ class SimdScalarLowering {
bool HasReplacement(size_t index, Node* node);
Node** GetReplacements(Node* node);
int ReplacementCount(Node* node);
+ void Float64ToInt64(Node** replacements, Node** result);
void Float32ToInt32(Node** replacements, Node** result);
void Int32ToFloat32(Node** replacements, Node** result);
+ void Int64ToFloat64(Node** replacements, Node** result);
+ void Int64ToInt32(Node** replacements, Node** result);
template <typename T>
void Int32ToSmallerInt(Node** replacements, Node** result);
template <typename T>
void SmallerIntToInt32(Node** replacements, Node** result);
+ void Int32ToInt64(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
@@ -89,6 +94,8 @@ class SimdScalarLowering {
void LowerStoreOp(Node* node);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool not_horizontal = true);
+ Node* ConstructPhiForComparison(Diamond d, SimdType rep_type, int true_value,
+ int false_value);
void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
Node* FixUpperBits(Node* input, int32_t shift);
@@ -111,6 +118,7 @@ class SimdScalarLowering {
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
MachineType MachineTypeFrom(SimdType simdType);
void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
+ void LowerAllTrueOp(Node* node, SimdType rep_type);
MachineGraph* const mcgraph_;
NodeMarker<State> state_;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 491fe74e39..2842259a2e 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -2828,6 +2828,7 @@ class RepresentationSelector {
return VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kTierUpCheck:
case IrOpcode::kUpdateInterruptBudget: {
ProcessInput<T>(node, 0, UseInfo::AnyTagged());
ProcessRemainingInputs<T>(node, 1);
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index e2f87b674e..3a5b3c6ec6 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -220,6 +220,41 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
if (m.left().node() == m.right().node()) return ReplaceBoolean(true);
break;
}
+ case IrOpcode::kCheckedInt32Add: {
+ // (x + a) + b => x + (a + b) where a and b are constants and have the
+ // same sign.
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ Node* checked_int32_add = m.left().node();
+ if (checked_int32_add->opcode() == IrOpcode::kCheckedInt32Add) {
+ Int32BinopMatcher n(checked_int32_add);
+ if (n.right().HasValue() &&
+ (n.right().Value() >= 0) == (m.right().Value() >= 0)) {
+ int32_t val;
+ bool overflow = base::bits::SignedAddOverflow32(
+ n.right().Value(), m.right().Value(), &val);
+ if (!overflow) {
+ bool has_no_other_value_uses = true;
+ for (Edge edge : checked_int32_add->use_edges()) {
+ if (!edge.from()->IsDead() &&
+ !NodeProperties::IsEffectEdge(edge) &&
+ edge.from() != node) {
+ has_no_other_value_uses = false;
+ break;
+ }
+ }
+ if (has_no_other_value_uses) {
+ node->ReplaceInput(0, n.left().node());
+ node->ReplaceInput(1, jsgraph()->Int32Constant(val));
+ RelaxEffectsAndControls(checked_int32_add);
+ return Changed(node);
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
default:
break;
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index bedfb6acaa..33bd71d221 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -289,19 +289,26 @@ CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
bool operator==(DynamicCheckMapsParameters const& lhs,
DynamicCheckMapsParameters const& rhs) {
- return lhs.handler().address() == rhs.handler().address() &&
- lhs.feedback() == rhs.feedback() && lhs.state() == rhs.state();
+ // FeedbackSource is sufficient as an equality check. FeedbackSource uniquely
+ // determines all other properties (handler, flags and the monomorphic map
+ DCHECK_IMPLIES(lhs.feedback() == rhs.feedback(),
+ lhs.flags() == rhs.flags() && lhs.state() == rhs.state() &&
+ lhs.handler().address() == rhs.handler().address() &&
+ lhs.map().address() == rhs.map().address());
+ return lhs.feedback() == rhs.feedback();
}
size_t hash_value(DynamicCheckMapsParameters const& p) {
FeedbackSource::Hash feedback_hash;
- return base::hash_combine(p.handler().address(), feedback_hash(p.feedback()),
- p.state());
+ // FeedbackSource is sufficient for hashing. FeedbackSource uniquely
+ // determines all other properties (handler, flags and the monomorphic map
+ return base::hash_combine(feedback_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os,
DynamicCheckMapsParameters const& p) {
- return os << p.handler() << ", " << p.feedback() << "," << p.state();
+ return os << p.handler() << ", " << p.feedback() << "," << p.state() << ","
+ << p.flags() << "," << p.map().address();
}
DynamicCheckMapsParameters const& DynamicCheckMapsParametersOf(
@@ -1315,6 +1322,12 @@ const Operator* SimplifiedOperatorBuilder::UpdateInterruptBudget(int delta) {
"UpdateInterruptBudget", 1, 1, 1, 0, 1, 0, delta);
}
+const Operator* SimplifiedOperatorBuilder::TierUpCheck() {
+ return zone()->New<Operator>(IrOpcode::kTierUpCheck,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "TierUpCheck", 1, 1, 1, 0, 1, 0);
+}
+
const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
DCHECK(type.IsRange());
return zone()->New<Operator1<Type>>(IrOpcode::kAssertType,
@@ -1474,11 +1487,10 @@ const Operator* SimplifiedOperatorBuilder::CheckMaps(
}
const Operator* SimplifiedOperatorBuilder::DynamicCheckMaps(
- CheckMapsFlags flags, Handle<Object> handler,
- const FeedbackSource& feedback,
- DynamicCheckMapsParameters::ICState ic_state) {
- DynamicCheckMapsParameters const parameters(flags, handler, feedback,
- ic_state);
+ CheckMapsFlags flags, Handle<Object> handler, MaybeHandle<Map> maybe_map,
+ const FeedbackSource& feedback) {
+ DynamicCheckMapsParameters const parameters(flags, handler, maybe_map,
+ feedback);
return zone()->New<Operator1<DynamicCheckMapsParameters>>( // --
IrOpcode::kDynamicCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index c4b5740c10..eab865fd59 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -432,19 +432,26 @@ class DynamicCheckMapsParameters final {
enum ICState { kMonomorphic, kPolymorphic };
DynamicCheckMapsParameters(CheckMapsFlags flags, Handle<Object> handler,
- const FeedbackSource& feedback, ICState state)
- : flags_(flags), handler_(handler), feedback_(feedback), state_(state) {}
+ MaybeHandle<Map> maybe_map,
+ const FeedbackSource& feedback)
+ : flags_(flags),
+ handler_(handler),
+ maybe_map_(maybe_map),
+ feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
Handle<Object> handler() const { return handler_; }
+ MaybeHandle<Map> map() const { return maybe_map_; }
FeedbackSource const& feedback() const { return feedback_; }
- ICState const& state() const { return state_; }
+ ICState state() const {
+ return maybe_map_.is_null() ? ICState::kPolymorphic : ICState::kMonomorphic;
+ }
private:
CheckMapsFlags const flags_;
Handle<Object> const handler_;
+ MaybeHandle<Map> const maybe_map_;
FeedbackSource const feedback_;
- ICState const state_;
};
bool operator==(DynamicCheckMapsParameters const&,
@@ -803,6 +810,11 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// delta parameter represents the executed bytecodes since the last update.
const Operator* UpdateInterruptBudget(int delta);
+ // Takes the current feedback vector as input 0, and generates a check of the
+ // vector's marker. Depending on the marker's value, we either do nothing,
+ // trigger optimized compilation, or install a finished code object.
+ const Operator* TierUpCheck();
+
const Operator* ToBoolean();
const Operator* StringConcat();
@@ -875,10 +887,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckInternalizedString();
const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
const FeedbackSource& = FeedbackSource());
- const Operator* DynamicCheckMaps(
- CheckMapsFlags flags, Handle<Object> handler,
- const FeedbackSource& feedback,
- DynamicCheckMapsParameters::ICState ic_state);
+ const Operator* DynamicCheckMaps(CheckMapsFlags flags, Handle<Object> handler,
+ MaybeHandle<Map> map,
+ const FeedbackSource& feedback);
const Operator* CheckNotTaggedHole();
const Operator* CheckNumber(const FeedbackSource& feedback);
const Operator* CheckReceiver();
@@ -1159,6 +1170,18 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
}
};
+class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
+ public:
+ explicit constexpr TierUpCheckNode(Node* node)
+ : SimplifiedNodeWrapperBase(node) {
+ CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kTierUpCheck);
+ }
+
+#define INPUTS(V) V(FeedbackVector, feedback_vector, 0, FeedbackVector)
+ INPUTS(DEFINE_INPUT_ACCESSORS)
+#undef INPUTS
+};
+
class UpdateInterruptBudgetNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr UpdateInterruptBudgetNode(Node* node)
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index c89d5d712a..70dadd9441 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -396,7 +396,8 @@ UnobservablesSet RedundantStoreFinder::RecomputeUseIntersection(Node* node) {
// Everything is observable after these opcodes; return the empty set.
DCHECK_EXTRA(
opcode == IrOpcode::kReturn || opcode == IrOpcode::kTerminate ||
- opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow,
+ opcode == IrOpcode::kDeoptimize || opcode == IrOpcode::kThrow ||
+ opcode == IrOpcode::kTailCall,
"for #%d:%s", node->id(), node->op()->mnemonic());
USE(opcode);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 9ec3eac5e5..a4996f3cc2 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -325,7 +325,7 @@ void Typer::Run(const NodeVector& roots,
induction_vars->ChangeToInductionVariablePhis();
}
Visitor visitor(this, induction_vars);
- GraphReducer graph_reducer(zone(), graph(), tick_counter_);
+ GraphReducer graph_reducer(zone(), graph(), tick_counter_, broker());
graph_reducer.AddReducer(&visitor);
for (Node* const root : roots) graph_reducer.ReduceNode(root);
graph_reducer.ReduceGraph();
@@ -1196,6 +1196,7 @@ Type Typer::Visitor::TypeTypeOf(Node* node) {
return Type::InternalizedString();
}
+Type Typer::Visitor::TypeTierUpCheck(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeUpdateInterruptBudget(Node* node) { UNREACHABLE(); }
// JS conversion operators.
@@ -1307,6 +1308,10 @@ Type Typer::Visitor::TypeJSLoadProperty(Node* node) {
Type Typer::Visitor::TypeJSLoadNamed(Node* node) { return Type::NonInternal(); }
+Type Typer::Visitor::TypeJSLoadNamedFromSuper(Node* node) {
+ return Type::NonInternal();
+}
+
Type Typer::Visitor::TypeJSLoadGlobal(Node* node) {
return Type::NonInternal();
}
@@ -1821,7 +1826,7 @@ Type Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
return TypeUnaryOp(node, ToObject);
- case Runtime::kInlineToStringRT:
+ case Runtime::kInlineToString:
return TypeUnaryOp(node, ToString);
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index edf085485c..302e1212ee 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -722,6 +722,9 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kJSLoadNamed:
CheckTypeIs(node, Type::Any());
break;
+ case IrOpcode::kJSLoadNamedFromSuper:
+ CheckTypeIs(node, Type::Any());
+ break;
case IrOpcode::kJSLoadGlobal:
CheckTypeIs(node, Type::Any());
CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
@@ -760,6 +763,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTypeOf:
CheckTypeIs(node, Type::InternalizedString());
break;
+ case IrOpcode::kTierUpCheck:
case IrOpcode::kUpdateInterruptBudget:
CheckValueInputIs(node, 0, Type::Any());
CheckNotTyped(node);
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f8f333da2b..91dde088f6 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -134,15 +134,18 @@ MachineType assert_size(int expected_size, MachineType type) {
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(index), value, \
MachineRepresentation::kTagged, kFullWriteBarrier)
-void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
+void EnsureEnd(MachineGraph* mcgraph) {
Graph* g = mcgraph->graph();
- if (g->end()) {
- NodeProperties::MergeControlToEnd(g, mcgraph->common(), node);
- } else {
- g->SetEnd(g->NewNode(mcgraph->common()->End(1), node));
+ if (g->end() == nullptr) {
+ g->SetEnd(g->NewNode(mcgraph->common()->End(0)));
}
}
+void MergeControlToEnd(MachineGraph* mcgraph, Node* node) {
+ EnsureEnd(mcgraph);
+ NodeProperties::MergeControlToEnd(mcgraph->graph(), mcgraph->common(), node);
+}
+
bool ContainsSimd(const wasm::FunctionSig* sig) {
for (auto type : sig->all()) {
if (type == wasm::kWasmS128) return true;
@@ -3051,15 +3054,6 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
{
// Function imported to module.
- // TODO(9495): Make sure it works with functions imported from other
- // modules. Currently, this will never happen: Since functions have to be
- // tunneled through JS, and we currently do not have a JS API to pass
- // specific function types, we habe to export/import function references
- // as funcref. Then, we cannot cast down to the type of the function,
- // because we do not have access to the defining module's types. This
- // could be fixed either by building a richer JS API, or by implementing
- // the type import proposal. That said, this code should work for those
- // cases too.
gasm_->Bind(&imported_label);
Node* imported_instance = gasm_->Load(
@@ -3087,8 +3081,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
// Call to a WasmJSFunction.
// The call target is the wasm-to-js wrapper code.
gasm_->Bind(&js_label);
- // TODO(7748): Implement.
- TrapIfTrue(wasm::kTrapUnreachable, gasm_->Int32Constant(1), position);
+ // TODO(9495): Implement when the interaction with the type reflection
+ // proposal is clear.
+ TrapIfTrue(wasm::kTrapWasmJSFunction, gasm_->Int32Constant(1), position);
gasm_->Goto(&end_label, args[0], RefNull() /* Dummy value */);
}
@@ -3675,7 +3670,7 @@ Node* WasmGraphBuilder::CheckBoundsAndAlignment(
// bounds-checked index, which is guaranteed to have (the equivalent of)
// {uintptr_t} representation.
Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
- uint32_t offset,
+ uint64_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
DCHECK_LE(1, access_size);
@@ -3686,13 +3681,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
return index;
}
- if (!base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size)) {
+ // If the offset does not fit in a uintptr_t, this can never succeed on this
+ // machine.
+ if (offset > std::numeric_limits<uintptr_t>::max() ||
+ !base::IsInBounds<uintptr_t>(offset, access_size,
+ env_->max_memory_size)) {
// The access will be out of bounds, even for the largest memory.
TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
- return mcgraph()->IntPtrConstant(0);
+ return mcgraph()->UintPtrConstant(0);
}
- uint64_t end_offset = uint64_t{offset} + access_size - 1u;
- Node* end_offset_node = IntPtrConstant(end_offset);
+ uintptr_t end_offset = offset + access_size - 1u;
+ Node* end_offset_node = mcgraph_->UintPtrConstant(end_offset);
// The accessed memory is [index + offset, index + end_offset].
// Check that the last read byte (at {index + end_offset}) is in bounds.
@@ -3996,9 +3995,7 @@ Node* WasmGraphBuilder::LoadTransform(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
- if (memtype.representation() == MachineRepresentation::kSimd128) {
- has_simd_ = true;
- }
+ has_simd_ = true;
Node* load;
@@ -4366,15 +4363,17 @@ CallDescriptor* WasmGraphBuilder::GetI64AtomicWaitCallDescriptor() {
return i64_atomic_wait_descriptor_;
}
-void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
+void WasmGraphBuilder::LowerInt64(Signature<MachineRepresentation>* sig) {
if (mcgraph()->machine()->Is64()) return;
Int64Lowering r(mcgraph()->graph(), mcgraph()->machine(), mcgraph()->common(),
- mcgraph()->zone(),
- CreateMachineSignature(mcgraph()->zone(), sig_, origin),
- std::move(lowering_special_case_));
+ mcgraph()->zone(), sig, std::move(lowering_special_case_));
r.LowerGraph();
}
+void WasmGraphBuilder::LowerInt64(CallOrigin origin) {
+ LowerInt64(CreateMachineSignature(mcgraph()->zone(), sig_, origin));
+}
+
void WasmGraphBuilder::SimdScalarLoweringForTesting() {
SimdScalarLowering(mcgraph(), CreateMachineSignature(mcgraph()->zone(), sig_,
kCalledFromWasm))
@@ -4952,8 +4951,8 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode, Node* const* inputs) {
return graph()->NewNode(mcgraph()->machine()->V8x16AnyTrue(), inputs[0]);
case wasm::kExprV8x16AllTrue:
return graph()->NewNode(mcgraph()->machine()->V8x16AllTrue(), inputs[0]);
- case wasm::kExprS8x16Swizzle:
- return graph()->NewNode(mcgraph()->machine()->S8x16Swizzle(), inputs[0],
+ case wasm::kExprI8x16Swizzle:
+ return graph()->NewNode(mcgraph()->machine()->I8x16Swizzle(), inputs[0],
inputs[1]);
default:
FATAL_UNSUPPORTED_OPCODE(opcode);
@@ -5014,7 +5013,7 @@ Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
Node* WasmGraphBuilder::Simd8x16ShuffleOp(const uint8_t shuffle[16],
Node* const* inputs) {
has_simd_ = true;
- return graph()->NewNode(mcgraph()->machine()->S8x16Shuffle(shuffle),
+ return graph()->NewNode(mcgraph()->machine()->I8x16Shuffle(shuffle),
inputs[0], inputs[1]);
}
@@ -5568,13 +5567,13 @@ Node* IsI31(GraphAssembler* gasm, Node* object) {
}
}
-void AssertFalse(GraphAssembler* gasm, Node* condition) {
+void AssertFalse(MachineGraph* mcgraph, GraphAssembler* gasm, Node* condition) {
#if DEBUG
if (FLAG_debug_code) {
auto ok = gasm->MakeLabel();
gasm->GotoIfNot(condition, &ok);
+ EnsureEnd(mcgraph);
gasm->Unreachable();
- gasm->Goto(&ok);
gasm->Bind(&ok);
}
#endif
@@ -5592,7 +5591,7 @@ Node* WasmGraphBuilder::RefTest(Node* object, Node* rtt,
gasm_->GotoIf(IsI31(gasm_.get(), object), &done, gasm_->Int32Constant(0));
need_done_label = true;
} else {
- AssertFalse(gasm_.get(), IsI31(gasm_.get(), object));
+ AssertFalse(mcgraph(), gasm_.get(), IsI31(gasm_.get(), object));
}
if (null_check == kWithNullCheck) {
gasm_->GotoIf(gasm_->WordEqual(object, RefNull()), &done,
@@ -5627,7 +5626,7 @@ Node* WasmGraphBuilder::RefCast(Node* object, Node* rtt,
TrapIfTrue(wasm::kTrapIllegalCast, IsI31(gasm_.get(), object), position);
}
} else {
- AssertFalse(gasm_.get(), IsI31(gasm_.get(), object));
+ AssertFalse(mcgraph(), gasm_.get(), IsI31(gasm_.get(), object));
}
if (null_check == kWithNullCheck) {
TrapIfTrue(wasm::kTrapIllegalCast, gasm_->WordEqual(object, RefNull()),
@@ -5667,7 +5666,7 @@ Node* WasmGraphBuilder::BrOnCast(Node* object, Node* rtt,
merge_effects.emplace_back(effect());
}
} else {
- AssertFalse(gasm_.get(), is_i31);
+ AssertFalse(mcgraph(), gasm_.get(), is_i31);
}
if (null_check == kWithNullCheck) {
@@ -5855,9 +5854,11 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
public:
WasmWrapperGraphBuilder(Zone* zone, MachineGraph* mcgraph,
const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module,
compiler::SourcePositionTable* spt,
StubCallMode stub_mode, wasm::WasmFeatures features)
: WasmGraphBuilder(nullptr, zone, mcgraph, sig, spt),
+ module_(module),
stub_mode_(stub_mode),
enabled_features_(features) {}
@@ -6063,6 +6064,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
if (representation == wasm::HeapType::kEq) {
return BuildAllocateObjectWrapper(node);
}
+ if (type.has_index() && module_->has_signature(type.ref_index())) {
+ // Typed function
+ return node;
+ }
// TODO(7748): Figure out a JS interop story for arrays and structs.
// If this is reached, then IsJSCompatibleSignature() is too permissive.
UNREACHABLE();
@@ -6148,6 +6153,29 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
graph()->NewNode(call, target, input, context, effect(), control()));
}
+ void BuildCheckValidRefValue(Node* input, Node* js_context,
+ wasm::ValueType type) {
+ // Make sure ValueType fits in a Smi.
+ STATIC_ASSERT(wasm::ValueType::kLastUsedBit + 1 <= kSmiValueSize);
+ Node* inputs[] = {
+ instance_node_.get(), input,
+ IntPtrConstant(IntToSmi(static_cast<int>(type.raw_bit_field())))};
+
+ Node* check = BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
+ Runtime::kWasmIsValidRefValue, js_context, inputs, 3)));
+
+ Diamond type_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
+ type_check.Chain(control());
+ SetControl(type_check.if_false);
+
+ Node* old_effect = effect();
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ nullptr, 0);
+
+ SetEffectControl(type_check.EffectPhi(old_effect, effect()),
+ type_check.merge);
+ }
+
Node* FromJS(Node* input, Node* js_context, wasm::ValueType type) {
switch (type.kind()) {
case wasm::ValueType::kRef:
@@ -6156,28 +6184,21 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::HeapType::kExtern:
case wasm::HeapType::kExn:
return input;
- case wasm::HeapType::kFunc: {
- Node* check =
- BuildChangeSmiToInt32(SetEffect(BuildCallToRuntimeWithContext(
- Runtime::kWasmIsValidFuncRefValue, js_context, &input, 1)));
-
- Diamond type_check(graph(), mcgraph()->common(), check,
- BranchHint::kTrue);
- type_check.Chain(control());
- SetControl(type_check.if_false);
-
- Node* old_effect = effect();
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
- js_context, nullptr, 0);
-
- SetEffectControl(type_check.EffectPhi(old_effect, effect()),
- type_check.merge);
-
+ case wasm::HeapType::kFunc:
+ BuildCheckValidRefValue(input, js_context, type);
return input;
- }
case wasm::HeapType::kEq:
+ BuildCheckValidRefValue(input, js_context, type);
return BuildUnpackObjectWrapper(input);
+ case wasm::HeapType::kI31:
+ // If this is reached, then IsJSCompatibleSignature() is too
+ // permissive.
+ UNREACHABLE();
default:
+ if (module_->has_signature(type.ref_index())) {
+ BuildCheckValidRefValue(input, js_context, type);
+ return input;
+ }
// If this is reached, then IsJSCompatibleSignature() is too
// permissive.
UNREACHABLE();
@@ -6210,6 +6231,38 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
}
+ Node* SmiToFloat32(Node* input) {
+ return graph()->NewNode(mcgraph()->machine()->RoundInt32ToFloat32(),
+ BuildChangeSmiToInt32(input));
+ }
+
+ Node* SmiToFloat64(Node* input) {
+ return graph()->NewNode(mcgraph()->machine()->ChangeInt32ToFloat64(),
+ BuildChangeSmiToInt32(input));
+ }
+
+ Node* FromJSFast(Node* input, wasm::ValueType type) {
+ switch (type.kind()) {
+ case wasm::ValueType::kI32:
+ return BuildChangeSmiToInt32(input);
+ case wasm::ValueType::kF32:
+ return SmiToFloat32(input);
+ case wasm::ValueType::kF64:
+ return SmiToFloat64(input);
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kI64:
+ case wasm::ValueType::kRtt:
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
+ UNREACHABLE();
+ break;
+ }
+ }
+
void BuildModifyThreadInWasmFlag(bool new_value) {
if (!trap_handler::IsTrapHandlerEnabled()) return;
Node* isolate_root = BuildLoadIsolateRoot();
@@ -6280,55 +6333,15 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
return SetControl(CALL_BUILTIN(WasmAllocateJSArray, array_length, context));
}
- void BuildJSToWasmWrapper(bool is_import) {
- const int wasm_count = static_cast<int>(sig_->parameter_count());
- const int rets_count = static_cast<int>(sig_->return_count());
-
- // Build the start and the JS parameter nodes.
- SetEffectControl(Start(wasm_count + 5));
-
- // Create the js_closure and js_context parameters.
- Node* js_closure =
- graph()->NewNode(mcgraph()->common()->Parameter(
- Linkage::kJSCallClosureParamIndex, "%closure"),
- graph()->start());
- Node* js_context = graph()->NewNode(
- mcgraph()->common()->Parameter(
- Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
- graph()->start());
-
- // Create the instance_node node to pass as parameter. It is loaded from
- // an actual reference to an instance or a placeholder reference,
- // called {WasmExportedFunction} via the {WasmExportedFunctionData}
- // structure.
- Node* function_data = BuildLoadFunctionDataFromExportedFunction(js_closure);
- instance_node_.set(
- BuildLoadInstanceFromExportedFunctionData(function_data));
-
- if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) {
- // Throw a TypeError. Use the js_context of the calling javascript
- // function (passed as a parameter), such that the generated code is
- // js_context independent.
- BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
- nullptr, 0);
- TerminateThrow(effect(), control());
- return;
- }
-
- const int args_count = wasm_count + 1; // +1 for wasm_code.
- base::SmallVector<Node*, 16> args(args_count);
- base::SmallVector<Node*, 1> rets(rets_count);
-
- // Convert JS parameters to wasm numbers.
- for (int i = 0; i < wasm_count; ++i) {
- Node* param = Param(i + 1);
- Node* wasm_param = FromJS(param, js_context, sig_->GetParam(i));
- args[i + 1] = wasm_param;
- }
-
+ Node* BuildCallAndReturn(bool is_import, Node* js_context,
+ Node* function_data,
+ base::SmallVector<Node*, 16> args) {
// Set the ThreadInWasm flag before we do the actual call.
BuildModifyThreadInWasmFlag(true);
+ const int rets_count = static_cast<int>(sig_->return_count());
+ base::SmallVector<Node*, 1> rets(rets_count);
+
if (is_import) {
// Call to an imported function.
// Load function index from {WasmExportedFunctionData}.
@@ -6373,7 +6386,147 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
STORE_FIXED_ARRAY_SLOT_ANY(fixed_array, i, value);
}
}
- Return(jsval);
+ return jsval;
+ }
+
+ bool QualifiesForFastTransform(const wasm::FunctionSig*) {
+ const int wasm_count = static_cast<int>(sig_->parameter_count());
+ for (int i = 0; i < wasm_count; ++i) {
+ wasm::ValueType type = sig_->GetParam(i);
+ switch (type.kind()) {
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kI64:
+ case wasm::ValueType::kRtt:
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
+ return false;
+ case wasm::ValueType::kI32:
+ case wasm::ValueType::kF32:
+ case wasm::ValueType::kF64:
+ break;
+ }
+ }
+ return true;
+ }
+
+ Node* IsSmi(Node* input) {
+ return gasm_->Word32Equal(
+ gasm_->Word32And(BuildTruncateIntPtrToInt32(input),
+ gasm_->Int32Constant(kSmiTagMask)),
+ gasm_->Int32Constant(0));
+ }
+
+ Node* CanTransformFast(Node* input, wasm::ValueType type) {
+ switch (type.kind()) {
+ case wasm::ValueType::kI32:
+ case wasm::ValueType::kF64:
+ case wasm::ValueType::kF32:
+ return IsSmi(input);
+ case wasm::ValueType::kRef:
+ case wasm::ValueType::kOptRef:
+ case wasm::ValueType::kI64:
+ case wasm::ValueType::kRtt:
+ case wasm::ValueType::kS128:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
+ case wasm::ValueType::kBottom:
+ case wasm::ValueType::kStmt:
+ UNREACHABLE();
+ break;
+ }
+ }
+
+ void BuildJSToWasmWrapper(bool is_import) {
+ const int wasm_count = static_cast<int>(sig_->parameter_count());
+
+ // Build the start and the JS parameter nodes.
+ SetEffectControl(Start(wasm_count + 5));
+
+ // Create the js_closure and js_context parameters.
+ Node* js_closure =
+ graph()->NewNode(mcgraph()->common()->Parameter(
+ Linkage::kJSCallClosureParamIndex, "%closure"),
+ graph()->start());
+ Node* js_context = graph()->NewNode(
+ mcgraph()->common()->Parameter(
+ Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
+ graph()->start());
+
+ // Create the instance_node node to pass as parameter. It is loaded from
+ // an actual reference to an instance or a placeholder reference,
+ // called {WasmExportedFunction} via the {WasmExportedFunctionData}
+ // structure.
+ Node* function_data = BuildLoadFunctionDataFromExportedFunction(js_closure);
+ instance_node_.set(
+ BuildLoadInstanceFromExportedFunctionData(function_data));
+
+ if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
+ // Throw a TypeError. Use the js_context of the calling javascript
+ // function (passed as a parameter), such that the generated code is
+ // js_context independent.
+ BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, js_context,
+ nullptr, 0);
+ TerminateThrow(effect(), control());
+ return;
+ }
+
+ const int args_count = wasm_count + 1; // +1 for wasm_code.
+
+ // Check whether the signature of the function allows for a fast
+ // transformation (if any params exist that need transformation).
+ // Create a fast transformation path, only if it does.
+ bool include_fast_path = wasm_count && QualifiesForFastTransform(sig_);
+
+ // Prepare Param() nodes. Param() nodes can only be created once,
+ // so we need to use the same nodes along all possible transformation paths.
+ base::SmallVector<Node*, 16> params(args_count);
+ for (int i = 0; i < wasm_count; ++i) params[i + 1] = Param(i + 1);
+
+ auto done = gasm_->MakeLabel(MachineRepresentation::kTagged);
+ if (include_fast_path) {
+ auto slow_path = gasm_->MakeDeferredLabel();
+ // Check if the params received on runtime can be actually transformed
+ // using the fast transformation. When a param that cannot be transformed
+ // fast is encountered, skip checking the rest and fall back to the slow
+ // path.
+ for (int i = 0; i < wasm_count; ++i) {
+ gasm_->GotoIfNot(CanTransformFast(params[i + 1], sig_->GetParam(i)),
+ &slow_path);
+ }
+ // Convert JS parameters to wasm numbers using the fast transformation
+ // and build the call.
+ base::SmallVector<Node*, 16> args(args_count);
+ for (int i = 0; i < wasm_count; ++i) {
+ Node* wasm_param = FromJSFast(params[i + 1], sig_->GetParam(i));
+ args[i + 1] = wasm_param;
+ }
+ Node* jsval =
+ BuildCallAndReturn(is_import, js_context, function_data, args);
+ gasm_->Goto(&done, jsval);
+ gasm_->Bind(&slow_path);
+ }
+ // Convert JS parameters to wasm numbers using the default transformation
+ // and build the call.
+ base::SmallVector<Node*, 16> args(args_count);
+ for (int i = 0; i < wasm_count; ++i) {
+ Node* wasm_param = FromJS(params[i + 1], js_context, sig_->GetParam(i));
+ args[i + 1] = wasm_param;
+ }
+ Node* jsval =
+ BuildCallAndReturn(is_import, js_context, function_data, args);
+ // If both the default and a fast transformation paths are present,
+ // get the return value based on the path used.
+ if (include_fast_path) {
+ gasm_->Goto(&done, jsval);
+ gasm_->Bind(&done);
+ Return(done.PhiAt(0));
+ } else {
+ Return(jsval);
+ }
if (ContainsInt64(sig_)) LowerInt64(kCalledFromJS);
}
@@ -6743,7 +6896,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
isolate->factory()->undefined_value()));
// Throw a TypeError if the signature is incompatible with JavaScript.
- if (!wasm::IsJSCompatibleSignature(sig_, enabled_features_)) {
+ if (!wasm::IsJSCompatibleSignature(sig_, module_, enabled_features_)) {
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, context,
nullptr, 0);
TerminateThrow(effect(), control());
@@ -6898,6 +7051,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
}
private:
+ const wasm::WasmModule* module_;
StubCallMode stub_mode_;
SetOncePointer<Node> undefined_value_node_;
SetOncePointer<const Operator> int32_to_heapnumber_operator_;
@@ -6914,8 +7068,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, wasm::WasmEngine* wasm_engine,
- const wasm::FunctionSig* sig, bool is_import,
- const wasm::WasmFeatures& enabled_features) {
+ const wasm::FunctionSig* sig, const wasm::WasmModule* module,
+ bool is_import, const wasm::WasmFeatures& enabled_features) {
//----------------------------------------------------------------------------
// Create the Graph.
//----------------------------------------------------------------------------
@@ -6929,7 +7083,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
InstructionSelector::AlignmentRequirements());
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
StubCallMode::kCallBuiltinPointer,
enabled_features);
builder.BuildJSToWasmWrapper(is_import);
@@ -6956,6 +7110,7 @@ std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
Handle<JSReceiver> callable, const wasm::FunctionSig* expected_sig,
+ const wasm::WasmModule* module,
const wasm::WasmFeatures& enabled_features) {
if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
auto imported_function = Handle<WasmExportedFunction>::cast(callable);
@@ -6985,13 +7140,13 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
}
if (WasmCapiFunction::IsWasmCapiFunction(*callable)) {
auto capi_function = Handle<WasmCapiFunction>::cast(callable);
- if (!capi_function->IsSignatureEqual(expected_sig)) {
+ if (!capi_function->MatchesSignature(expected_sig)) {
return std::make_pair(WasmImportCallKind::kLinkError, callable);
}
return std::make_pair(WasmImportCallKind::kWasmToCapi, callable);
}
// Assuming we are calling to JS, check whether this would be a runtime error.
- if (!wasm::IsJSCompatibleSignature(expected_sig, enabled_features)) {
+ if (!wasm::IsJSCompatibleSignature(expected_sig, module, enabled_features)) {
return std::make_pair(WasmImportCallKind::kRuntimeTypeError, callable);
}
// For JavaScript calls, determine whether the target has an arity match.
@@ -7069,10 +7224,14 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
Compiler::Compile(function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
+#ifndef V8_REVERSE_JSARGS
+ // This optimization is disabled when the arguments are reversed. It will be
+ // subsumed when the argumens adaptor frame is removed.
if (shared->is_safe_to_skip_arguments_adaptor()) {
return std::make_pair(
WasmImportCallKind::kJSFunctionArityMismatchSkipAdaptor, callable);
}
+#endif
return std::make_pair(WasmImportCallKind::kJSFunctionArityMismatch,
callable);
@@ -7216,9 +7375,9 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
SourcePositionTable* source_position_table =
source_positions ? zone.New<SourcePositionTable>(graph) : nullptr;
- WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, source_position_table,
- StubCallMode::kCallWasmRuntimeStub,
- env->enabled_features);
+ WasmWrapperGraphBuilder builder(
+ &zone, mcgraph, sig, env->module, source_position_table,
+ StubCallMode::kCallWasmRuntimeStub, env->enabled_features);
builder.BuildWasmImportCallWrapper(kind, expected_arity);
// Build a name in the form "wasm-to-js-<kind>-<signature>".
@@ -7261,9 +7420,9 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()));
- WasmWrapperGraphBuilder builder(&zone, mcgraph, sig, source_positions,
- StubCallMode::kCallWasmRuntimeStub,
- native_module->enabled_features());
+ WasmWrapperGraphBuilder builder(
+ &zone, mcgraph, sig, native_module->module(), source_positions,
+ StubCallMode::kCallWasmRuntimeStub, native_module->enabled_features());
// Set up the graph start.
int param_count = static_cast<int>(sig->parameter_count()) +
@@ -7297,7 +7456,8 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine* wasm_engine,
}
MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
- const wasm::FunctionSig* sig) {
+ const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph* graph = zone->New<Graph>(zone.get());
@@ -7308,7 +7468,7 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
InstructionSelector::AlignmentRequirements());
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildJSToJSWrapper(isolate);
@@ -7342,7 +7502,8 @@ MaybeHandle<Code> CompileJSToJSWrapper(Isolate* isolate,
return code;
}
-Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig) {
+Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module) {
std::unique_ptr<Zone> zone = std::make_unique<Zone>(
isolate->allocator(), ZONE_NAME, kCompressGraphZone);
Graph* graph = zone->New<Graph>(zone.get());
@@ -7353,7 +7514,7 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, const wasm::FunctionSig* sig) {
InstructionSelector::AlignmentRequirements());
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
- WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr,
+ WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, module, nullptr,
StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildCWasmEntry();
@@ -7417,16 +7578,50 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
return false;
}
- builder.LowerInt64(WasmWrapperGraphBuilder::kCalledFromWasm);
-
+ // Lower SIMD first, i64x2 nodes will be lowered to int64 nodes, then int64
+ // lowering will take care of them.
+ auto sig = CreateMachineSignature(mcgraph->zone(), func_body.sig,
+ WasmGraphBuilder::kCalledFromWasm);
if (builder.has_simd() &&
(!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
- SimdScalarLowering(
- mcgraph, CreateMachineSignature(mcgraph->zone(), func_body.sig,
- WasmGraphBuilder::kCalledFromWasm))
- .LowerGraph();
+ SimdScalarLowering(mcgraph, sig).LowerGraph();
+
+ // SimdScalarLowering changes all v128 to 4 i32, so update the machine
+ // signature for the call to LowerInt64.
+ size_t return_count = 0;
+ size_t param_count = 0;
+ for (auto ret : sig->returns()) {
+ return_count += ret == MachineRepresentation::kSimd128 ? 4 : 1;
+ }
+ for (auto param : sig->parameters()) {
+ param_count += param == MachineRepresentation::kSimd128 ? 4 : 1;
+ }
+
+ Signature<MachineRepresentation>::Builder sig_builder(
+ mcgraph->zone(), return_count, param_count);
+ for (auto ret : sig->returns()) {
+ if (ret == MachineRepresentation::kSimd128) {
+ for (int i = 0; i < 4; ++i) {
+ sig_builder.AddReturn(MachineRepresentation::kWord32);
+ }
+ } else {
+ sig_builder.AddReturn(ret);
+ }
+ }
+ for (auto param : sig->parameters()) {
+ if (param == MachineRepresentation::kSimd128) {
+ for (int i = 0; i < 4; ++i) {
+ sig_builder.AddParam(MachineRepresentation::kWord32);
+ }
+ } else {
+ sig_builder.AddParam(param);
+ }
+ }
+ sig = sig_builder.Build();
}
+ builder.LowerInt64(sig);
+
if (func_index >= FLAG_trace_wasm_ast_start &&
func_index < FLAG_trace_wasm_ast_end) {
PrintRawWasmCode(allocator, func_body, env->module, wasm::kPrintLocals);
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index c35ca3e844..ab42610239 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -106,6 +106,7 @@ constexpr WasmImportCallKind kDefaultImportCallKind =
// another target, which is why the ultimate target is returned as well.
V8_EXPORT_PRIVATE std::pair<WasmImportCallKind, Handle<JSReceiver>>
ResolveWasmImportCall(Handle<JSReceiver> callable, const wasm::FunctionSig* sig,
+ const wasm::WasmModule* module,
const wasm::WasmFeatures& enabled_features);
// Compiles an import call wrapper, which allows Wasm to call imports.
@@ -122,13 +123,14 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::WasmEngine*,
// Returns an OptimizedCompilationJob object for a JS to Wasm wrapper.
std::unique_ptr<OptimizedCompilationJob> NewJSToWasmCompilationJob(
Isolate* isolate, wasm::WasmEngine* wasm_engine,
- const wasm::FunctionSig* sig, bool is_import,
- const wasm::WasmFeatures& enabled_features);
+ const wasm::FunctionSig* sig, const wasm::WasmModule* module,
+ bool is_import, const wasm::WasmFeatures& enabled_features);
// Compiles a stub with JS linkage that serves as an adapter for function
// objects constructed via {WebAssembly.Function}. It performs a round-trip
// simulating a JS-to-Wasm-to-JS coercion of parameter and return values.
-MaybeHandle<Code> CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*);
+MaybeHandle<Code> CompileJSToJSWrapper(Isolate*, const wasm::FunctionSig*,
+ const wasm::WasmModule* module);
enum CWasmEntryParameters {
kCodeEntry,
@@ -141,8 +143,8 @@ enum CWasmEntryParameters {
// Compiles a stub with C++ linkage, to be called from Execution::CallWasm,
// which knows how to feed it its parameters.
-V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(Isolate*,
- const wasm::FunctionSig*);
+V8_EXPORT_PRIVATE Handle<Code> CompileCWasmEntry(
+ Isolate*, const wasm::FunctionSig*, const wasm::WasmModule* module);
// Values from the instance object are cached between Wasm-level function calls.
// This struct allows the SSA environment handling this cache to be defined
@@ -359,6 +361,9 @@ class WasmGraphBuilder {
enum CallOrigin { kCalledFromWasm, kCalledFromJS };
+ // Overload for when we want to provide a specific signature, rather than
+ // build one using sig_, for example after scalar lowering.
+ V8_EXPORT_PRIVATE void LowerInt64(Signature<MachineRepresentation>* sig);
V8_EXPORT_PRIVATE void LowerInt64(CallOrigin origin);
V8_EXPORT_PRIVATE void SimdScalarLoweringForTesting();
@@ -452,7 +457,7 @@ class WasmGraphBuilder {
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
- Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
+ Node* BoundsCheckMem(uint8_t access_size, Node* index, uint64_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
// Check that the range [start, start + size) is in the range [0, max).
// Also updates *size with the valid range. Returns true if the range is