summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests/compiler
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-03-21 10:16:54 +0100
committerMichaël Zasso <targos@protonmail.com>2017-03-25 09:44:10 +0100
commitc459d8ea5d402c702948c860d9497b2230ff7e8a (patch)
tree56c282fc4d40e5cb613b47cf7be3ea0526ed5b6f /deps/v8/test/unittests/compiler
parente0bc5a7361b1d29c3ed034155fd779ce6f44fb13 (diff)
downloadnode-new-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.gz
deps: update V8 to 5.7.492.69
PR-URL: https://github.com/nodejs/node/pull/11752 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Diffstat (limited to 'deps/v8/test/unittests/compiler')
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc418
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc27
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc26
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h1
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc90
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc20
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc29
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc31
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc78
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/load-elimination-unittest.cc96
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc45
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc164
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc203
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc55
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h8
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/OWNERS5
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc (renamed from deps/v8/test/unittests/compiler/live-range-unittest.cc)30
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc (renamed from deps/v8/test/unittests/compiler/move-optimizer-unittest.cc)8
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc (renamed from deps/v8/test/unittests/compiler/register-allocator-unittest.cc)34
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc90
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc4
25 files changed, 983 insertions, 532 deletions
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
new file mode 100644
index 0000000000..4a531449fb
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -0,0 +1,418 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/bytecode-analysis.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/control-flow-builders.h"
+#include "src/objects-inl.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BytecodeAnalysisTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeAnalysisTest() {}
+ ~BytecodeAnalysisTest() override {}
+
+ static void SetUpTestCase() {
+ old_FLAG_ignition_peephole_ = i::FLAG_ignition_peephole;
+ i::FLAG_ignition_peephole = false;
+
+ old_FLAG_ignition_reo_ = i::FLAG_ignition_reo;
+ i::FLAG_ignition_reo = false;
+
+ TestWithIsolateAndZone::SetUpTestCase();
+ }
+
+ static void TearDownTestCase() {
+ TestWithIsolateAndZone::TearDownTestCase();
+ i::FLAG_ignition_peephole = old_FLAG_ignition_peephole_;
+ i::FLAG_ignition_reo = old_FLAG_ignition_reo_;
+ }
+
+ std::string ToLivenessString(const BytecodeLivenessState* liveness) const {
+ const BitVector& bit_vector = liveness->bit_vector();
+
+ std::string out;
+ out.resize(bit_vector.length());
+ for (int i = 0; i < bit_vector.length(); ++i) {
+ if (bit_vector.Contains(i)) {
+ out[i] = 'L';
+ } else {
+ out[i] = '.';
+ }
+ }
+ return out;
+ }
+
+ void EnsureLivenessMatches(
+ Handle<BytecodeArray> bytecode,
+ const std::vector<std::pair<std::string, std::string>>&
+ expected_liveness) {
+ BytecodeAnalysis analysis(bytecode, zone(), true);
+ analysis.Analyze(BailoutId::None());
+
+ interpreter::BytecodeArrayIterator iterator(bytecode);
+ for (auto liveness : expected_liveness) {
+ std::stringstream ss;
+ ss << std::setw(4) << iterator.current_offset() << " : ";
+ iterator.PrintTo(ss);
+
+ EXPECT_EQ(liveness.first, ToLivenessString(analysis.GetInLivenessFor(
+ iterator.current_offset())))
+ << " at bytecode " << ss.str();
+
+ EXPECT_EQ(liveness.second, ToLivenessString(analysis.GetOutLivenessFor(
+ iterator.current_offset())))
+ << " at bytecode " << ss.str();
+
+ iterator.Advance();
+ }
+
+ EXPECT_TRUE(iterator.done());
+ }
+
+ private:
+ static bool old_FLAG_ignition_peephole_;
+ static bool old_FLAG_ignition_reo_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysisTest);
+};
+
+bool BytecodeAnalysisTest::old_FLAG_ignition_peephole_;
+bool BytecodeAnalysisTest::old_FLAG_ignition_reo_;
+
+TEST_F(BytecodeAnalysisTest, EmptyBlock) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, SimpleLoad) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, StoreThenLoad) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back("...L", "L...");
+
+ builder.LoadNull();
+ expected_liveness.emplace_back("L...", "L...");
+
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, DiamondLoad) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_2(2);
+
+ interpreter::BytecodeLabel ld1_label;
+ interpreter::BytecodeLabel end_label;
+
+ builder.JumpIfTrue(&ld1_label);
+ expected_liveness.emplace_back("LLLL", "LLL.");
+
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L.L.", "..L.");
+
+ builder.Jump(&end_label);
+ expected_liveness.emplace_back("..L.", "..L.");
+
+ builder.Bind(&ld1_label);
+ builder.LoadAccumulatorWithRegister(reg_1);
+ expected_liveness.emplace_back(".LL.", "..L.");
+
+ builder.Bind(&end_label);
+
+ builder.LoadAccumulatorWithRegister(reg_2);
+ expected_liveness.emplace_back("..L.", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, DiamondLookupsAndBinds) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_2(2);
+
+ interpreter::BytecodeLabel ld1_label;
+ interpreter::BytecodeLabel end_label;
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back(".LLL", "LLLL");
+
+ builder.JumpIfTrue(&ld1_label);
+ expected_liveness.emplace_back("LLLL", "LLL.");
+
+ {
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "...L");
+
+ builder.StoreAccumulatorInRegister(reg_2);
+ expected_liveness.emplace_back("...L", "..L.");
+
+ builder.Jump(&end_label);
+ expected_liveness.emplace_back("..L.", "..L.");
+ }
+
+ builder.Bind(&ld1_label);
+ {
+ builder.LoadAccumulatorWithRegister(reg_1);
+ expected_liveness.emplace_back(".LL.", "..L.");
+ }
+
+ builder.Bind(&end_label);
+
+ builder.LoadAccumulatorWithRegister(reg_2);
+ expected_liveness.emplace_back("..L.", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, SimpleLoop) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_2(2);
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back("..LL", "L.LL");
+
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+ {
+ builder.JumpIfTrue(loop_builder.break_labels()->New());
+ expected_liveness.emplace_back("L.LL", "L.L.");
+
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "L..L");
+
+ builder.StoreAccumulatorInRegister(reg_2);
+ expected_liveness.emplace_back("L..L", "L.LL");
+
+ loop_builder.BindContinueTarget();
+ loop_builder.JumpToHeader(0);
+ expected_liveness.emplace_back("L.LL", "L.LL");
+ }
+ loop_builder.EndLoop();
+
+ builder.LoadAccumulatorWithRegister(reg_2);
+ expected_liveness.emplace_back("..L.", "...L");
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, TryCatch) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_context(2);
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back(".LLL", "LLL.");
+
+ interpreter::TryCatchBuilder try_builder(&builder, HandlerTable::CAUGHT);
+ try_builder.BeginTry(reg_context);
+ {
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("LLL.", ".LLL");
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back(".LLL", ".LL.");
+
+ builder.CallRuntime(Runtime::kThrow);
+ expected_liveness.emplace_back(".LL.", ".LLL");
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ // Star can't throw, so doesn't take handler liveness
+ expected_liveness.emplace_back("...L", "...L");
+ }
+ try_builder.EndTry();
+ expected_liveness.emplace_back("...L", "...L");
+
+ // Catch
+ {
+ builder.LoadAccumulatorWithRegister(reg_1);
+ expected_liveness.emplace_back(".L..", "...L");
+ }
+ try_builder.EndCatch();
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+ interpreter::Register reg_2(2);
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back("...L", "L..L");
+
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+ {
+ builder.JumpIfTrue(loop_builder.break_labels()->New());
+ expected_liveness.emplace_back("L..L", "L..L");
+
+ interpreter::BytecodeLabel ld1_label;
+ interpreter::BytecodeLabel end_label;
+ builder.JumpIfTrue(&ld1_label);
+ expected_liveness.emplace_back("L..L", "L..L");
+
+ {
+ builder.Jump(&end_label);
+ expected_liveness.emplace_back("L..L", "L..L");
+ }
+
+ builder.Bind(&ld1_label);
+ {
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("L...", "L..L");
+ }
+
+ builder.Bind(&end_label);
+
+ loop_builder.BindContinueTarget();
+ loop_builder.JumpToHeader(0);
+ expected_liveness.emplace_back("L..L", "L..L");
+ }
+ loop_builder.EndLoop();
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ std::vector<std::pair<std::string, std::string>> expected_liveness;
+
+ interpreter::Register reg_0(0);
+ interpreter::Register reg_1(1);
+
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back(".L.L", "LL..");
+
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+ {
+ builder.LoadAccumulatorWithRegister(reg_0);
+ expected_liveness.emplace_back("LL..", ".L..");
+
+ builder.LoadAccumulatorWithRegister(reg_1);
+ expected_liveness.emplace_back(".L..", ".L.L");
+
+ builder.JumpIfTrue(loop_builder.break_labels()->New());
+ expected_liveness.emplace_back(".L.L", ".L.L");
+
+ interpreter::LoopBuilder inner_loop_builder(&builder);
+ inner_loop_builder.LoopHeader();
+ {
+ builder.StoreAccumulatorInRegister(reg_0);
+ expected_liveness.emplace_back(".L.L", "LL.L");
+
+ builder.JumpIfTrue(inner_loop_builder.break_labels()->New());
+ expected_liveness.emplace_back("LL.L", "LL.L");
+
+ inner_loop_builder.BindContinueTarget();
+ inner_loop_builder.JumpToHeader(1);
+ expected_liveness.emplace_back(".L.L", ".L.L");
+ }
+ inner_loop_builder.EndLoop();
+
+ loop_builder.BindContinueTarget();
+ loop_builder.JumpToHeader(0);
+ expected_liveness.emplace_back("LL..", "LL..");
+ }
+ loop_builder.EndLoop();
+
+ builder.Return();
+ expected_liveness.emplace_back("...L", "....");
+
+ Handle<BytecodeArray> bytecode = builder.ToBytecodeArray(isolate());
+
+ EnsureLivenessMatches(bytecode, expected_liveness);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index f294a30596..ecc3070785 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -374,6 +374,33 @@ TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
IsReturn(vfalse, efalse, if_false)));
}
+TEST_F(CommonOperatorReducerTest, MultiReturnWithPhiAndEffectPhiAndMerge) {
+ Node* cond = Parameter(2);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = graph()->start();
+ Node* vtrue1 = Parameter(0);
+ Node* vtrue2 = Parameter(1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->start();
+ Node* vfalse1 = Parameter(1);
+ Node* vfalse2 = Parameter(0);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue1, vfalse1, merge);
+ Node* phi2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue2, vfalse2, merge);
+
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+ Node* ret =
+ graph()->NewNode(common()->Return(2), zero, phi1, phi2, ephi, merge);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+ StrictMock<MockAdvancedReducerEditor> editor;
+ Reduction const r = Reduce(&editor, ret);
+ // For now a return with multiple return values should not be reduced.
+ ASSERT_TRUE(!r.Changed());
+}
// -----------------------------------------------------------------------------
// Select
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index 9cce5475fd..8ad93eee6a 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -148,11 +148,9 @@ class EscapeAnalysisTest : public TypedGraphTest {
}
FieldAccess FieldAccessAtIndex(int offset) {
- FieldAccess access = {kTaggedBase,
- offset,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
@@ -439,9 +437,12 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
Branch();
Node* ifFalse = IfFalse();
- Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
- Node* state_values2 = graph()->NewNode(common()->StateValues(0));
- Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* state_values1 = graph()->NewNode(
+ common()->StateValues(1, SparseInputMask::Dense()), finish);
+ Node* state_values2 =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
+ Node* state_values3 =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
Node* frame_state = graph()->NewNode(
common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
nullptr),
@@ -479,9 +480,12 @@ TEST_F(EscapeAnalysisTest, DISABLED_DeoptReplacementIdentity) {
Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
Branch();
Node* ifFalse = IfFalse();
- Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
- Node* state_values2 = graph()->NewNode(common()->StateValues(1), finish);
- Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* state_values1 = graph()->NewNode(
+ common()->StateValues(1, SparseInputMask::Dense()), finish);
+ Node* state_values2 = graph()->NewNode(
+ common()->StateValues(1, SparseInputMask::Dense()), finish);
+ Node* state_values3 =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
Node* frame_state = graph()->NewNode(
common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
nullptr),
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index dc2ba7814b..6e48eaf96d 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -80,7 +80,8 @@ Node* GraphTest::UndefinedConstant() {
Node* GraphTest::EmptyFrameState() {
- Node* state_values = graph()->NewNode(common()->StateValues(0));
+ Node* state_values =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
return graph()->NewNode(
common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
nullptr),
@@ -98,6 +99,9 @@ Matcher<Node*> GraphTest::IsTrueConstant() {
return IsHeapConstant(factory()->true_value());
}
+Matcher<Node*> GraphTest::IsNullConstant() {
+ return IsHeapConstant(factory()->null_value());
+}
Matcher<Node*> GraphTest::IsUndefinedConstant() {
return IsHeapConstant(factory()->undefined_value());
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 2542e68a91..8701f1ff6d 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -53,6 +53,7 @@ class GraphTest : public virtual TestWithNativeContext,
}
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
+ Matcher<Node*> IsNullConstant();
Matcher<Node*> IsUndefinedConstant();
CommonOperatorBuilder* common() { return &common_; }
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 5c67a1ece0..7b4150ec0c 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -364,10 +364,13 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
zone(), false, 1, CallDescriptor::kNeedsFrameState);
// Build frame state for the state before the call.
- Node* parameters =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(1));
- Node* locals = m.AddNode(m.common()->TypedStateValues(&empty_types));
- Node* stack = m.AddNode(m.common()->TypedStateValues(&empty_types));
+ Node* parameters = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(1));
+ Node* locals = m.AddNode(
+ m.common()->TypedStateValues(&empty_types, SparseInputMask::Dense()));
+ Node* stack = m.AddNode(
+ m.common()->TypedStateValues(&empty_types, SparseInputMask::Dense()));
Node* context_sentinel = m.Int32Constant(0);
Node* state_node = m.AddNode(
m.common()->FrameState(bailout_id, OutputFrameStateCombine::Push(),
@@ -376,9 +379,9 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
m.UndefinedConstant());
// Build the call.
- Node* args[] = {receiver, m.UndefinedConstant(), m.Int32Constant(1), context};
- Node* call =
- m.CallNWithFrameState(descriptor, function_node, args, state_node);
+ Node* nodes[] = {function_node, receiver, m.UndefinedConstant(),
+ m.Int32Constant(1), context, state_node};
+ Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -419,12 +422,15 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
// Build frame state for the state before the call.
- Node* parameters =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
- Node* locals = m.AddNode(m.common()->TypedStateValues(&float64_type),
- m.Float64Constant(0.5));
- Node* stack = m.AddNode(m.common()->TypedStateValues(&tagged_type),
- m.UndefinedConstant());
+ Node* parameters = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(43));
+ Node* locals = m.AddNode(
+ m.common()->TypedStateValues(&float64_type, SparseInputMask::Dense()),
+ m.Float64Constant(0.5));
+ Node* stack = m.AddNode(
+ m.common()->TypedStateValues(&tagged_type, SparseInputMask::Dense()),
+ m.UndefinedConstant());
Node* context_sentinel = m.Int32Constant(0);
Node* state_node = m.AddNode(
m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
@@ -433,9 +439,9 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
m.UndefinedConstant());
// Build the call.
- Node* args[] = {function_node, receiver, context};
Node* stub_code = m.HeapConstant(callable.code());
- Node* call = m.CallNWithFrameState(descriptor, stub_code, args, state_node);
+ Node* nodes[] = {stub_code, function_node, receiver, context, state_node};
+ Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -477,15 +483,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
// We inserted 0 here.
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate()));
- EXPECT_EQ(MachineType::AnyTagged(),
- desc_before->GetType(0)); // function is always
- // tagged/any.
- EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
- EXPECT_EQ(MachineType::AnyTagged(),
- desc_before->GetType(2)); // context is always
- // tagged/any.
- EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
- EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(4));
// Function.
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(7)));
@@ -521,24 +518,30 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
// Build frame state for the state before the call.
- Node* parameters =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(63));
- Node* locals =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(64));
- Node* stack =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(65));
+ Node* parameters = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(63));
+ Node* locals = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(64));
+ Node* stack = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(65));
Node* frame_state_parent = m.AddNode(
m.common()->FrameState(bailout_id_parent,
OutputFrameStateCombine::Ignore(),
m.GetFrameStateFunctionInfo(1, 1)),
parameters, locals, stack, context, function_node, m.UndefinedConstant());
- Node* parameters2 =
- m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
- Node* locals2 = m.AddNode(m.common()->TypedStateValues(&float64_type),
- m.Float64Constant(0.25));
- Node* stack2 = m.AddNode(m.common()->TypedStateValues(&int32x2_type),
- m.Int32Constant(44), m.Int32Constant(45));
+ Node* parameters2 = m.AddNode(
+ m.common()->TypedStateValues(&int32_type, SparseInputMask::Dense()),
+ m.Int32Constant(43));
+ Node* locals2 = m.AddNode(
+ m.common()->TypedStateValues(&float64_type, SparseInputMask::Dense()),
+ m.Float64Constant(0.25));
+ Node* stack2 = m.AddNode(
+ m.common()->TypedStateValues(&int32x2_type, SparseInputMask::Dense()),
+ m.Int32Constant(44), m.Int32Constant(45));
Node* state_node = m.AddNode(
m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
m.GetFrameStateFunctionInfo(1, 1)),
@@ -546,9 +549,9 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
frame_state_parent);
// Build the call.
- Node* args[] = {function_node, receiver, context2};
Node* stub_code = m.HeapConstant(callable.code());
- Node* call = m.CallNWithFrameState(descriptor, stub_code, args, state_node);
+ Node* nodes[] = {stub_code, function_node, receiver, context2, state_node};
+ Node* call = m.CallNWithFrameState(descriptor, arraysize(nodes), nodes);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -585,31 +588,20 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
EXPECT_EQ(1u, desc_before_outer->locals_count());
EXPECT_EQ(1u, desc_before_outer->stack_count());
// Values from parent environment.
- EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(3)));
- EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(1));
// Context:
EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(4)));
- EXPECT_EQ(MachineType::AnyTagged(), desc_before_outer->GetType(2));
EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(5)));
- EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(3));
EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(6)));
- EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(4));
// Values from the nested frame.
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(2u, desc_before->stack_count());
- EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(8)));
- EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(9)));
- EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(2));
EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(10)));
- EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(11)));
- EXPECT_EQ(MachineType::Int32(), desc_before->GetType(4));
EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(12)));
- EXPECT_EQ(MachineType::Int32(), desc_before->GetType(5));
// Function.
EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 9d17c26a56..ee9f7914a6 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -19,6 +19,11 @@ static const char*
static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
RegisterConfiguration::kMaxFPRegisters)];
+namespace {
+static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
+ 0, 1, 2, 3, 4, 5, 6, 7};
+}
+
static void InitializeRegisterNames() {
char* loc = register_names_;
for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
@@ -81,7 +86,18 @@ int InstructionSequenceTest::GetAllocatableCode(int index,
}
const RegisterConfiguration* InstructionSequenceTest::config() {
- return sequence()->GetRegisterConfigurationForTesting();
+ if (!config_) {
+ config_.reset(new RegisterConfiguration(
+ num_general_registers_, num_double_registers_, num_general_registers_,
+ num_double_registers_, allocatable_codes, allocatable_codes,
+ kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
+ : RegisterConfiguration::COMBINE,
+ general_register_names_,
+ double_register_names_, // float register names
+ double_register_names_,
+ double_register_names_)); // SIMD 128 register names
+ }
+ return config_.get();
}
@@ -89,6 +105,8 @@ InstructionSequence* InstructionSequenceTest::sequence() {
if (sequence_ == nullptr) {
sequence_ = new (zone())
InstructionSequence(isolate(), zone(), &instruction_blocks_);
+ sequence_->SetRegisterConfigurationForTesting(
+ InstructionSequenceTest::config());
}
return sequence_;
}
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 400eafb4dc..83c8d003db 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -570,8 +570,7 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
MachineRepresentation::kFloat64);
Capture<Node*> stack_slot_capture;
- Matcher<Node*> stack_slot_matcher =
- IsStackSlot(MachineRepresentation::kWord64);
+ Matcher<Node*> stack_slot_matcher = IsStackSlot(sizeof(int64_t));
Capture<Node*> store_capture;
Matcher<Node*> store_matcher =
@@ -602,8 +601,7 @@ TEST_F(Int64LoweringTest, I64ReinterpretF64) {
MachineRepresentation::kWord64);
Capture<Node*> stack_slot;
- Matcher<Node*> stack_slot_matcher =
- IsStackSlot(MachineRepresentation::kWord64);
+ Matcher<Node*> stack_slot_matcher = IsStackSlot(sizeof(int64_t));
Capture<Node*> store;
Matcher<Node*> store_matcher = IsStore(
@@ -875,6 +873,25 @@ TEST_F(Int64LoweringTest, EffectPhiLoop) {
LowerGraph(load, MachineRepresentation::kWord64);
}
+
+TEST_F(Int64LoweringTest, LoopCycle) {
+ // New node with two placeholders.
+ Node* compare = graph()->NewNode(machine()->Word64Equal(), Int64Constant(0),
+ Int64Constant(value(0)));
+
+ Node* load = graph()->NewNode(
+ machine()->Load(MachineType::Int64()), Int64Constant(value(1)),
+ Int64Constant(value(2)), graph()->start(),
+ graph()->NewNode(
+ common()->Loop(2), graph()->start(),
+ graph()->NewNode(common()->IfFalse(),
+ graph()->NewNode(common()->Branch(), compare,
+ graph()->start()))));
+
+ NodeProperties::ReplaceValueInput(compare, load, 0);
+
+ LowerGraph(load, MachineRepresentation::kWord64);
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index f4a1192abf..56516c9ed0 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/isolate-inl.h"
+#include "src/type-feedback-vector.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -46,7 +47,8 @@ class JSCreateLoweringTest : public TypedGraphTest {
}
Node* FrameState(Handle<SharedFunctionInfo> shared, Node* outer_frame_state) {
- Node* state_values = graph()->NewNode(common()->StateValues(0));
+ Node* state_values =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
return graph()->NewNode(
common()->FrameState(
BailoutId::None(), OutputFrameStateCombine::Ignore(),
@@ -138,13 +140,26 @@ TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
// JSCreateClosure
TEST_F(JSCreateLoweringTest, JSCreateClosureViaInlinedAllocation) {
+ if (!FLAG_turbo_lower_create_closure) return;
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
Handle<SharedFunctionInfo> shared(isolate()->number_function()->shared());
- Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
- context, effect, control));
+
+ // Create a mock feedback vector. It just has to be an array with an array
+ // in slot 0.
+ Handle<FixedArray> array = isolate()->factory()->NewFixedArray(
+ TypeFeedbackVector::kReservedIndexCount + 1);
+ array->set_map_no_write_barrier(
+ isolate()->heap()->type_feedback_vector_map());
+ Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
+ FeedbackVectorSlot slot(0);
+ vector->Set(slot, *vector);
+ VectorSlotPair pair(vector, slot);
+
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->CreateClosure(shared, pair, NOT_TENURED),
+ context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(JSFunction::kSize),
@@ -160,9 +175,9 @@ TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
Node* const context = Parameter(Type::Any());
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r =
- Reduce(graph()->NewNode(javascript()->CreateFunctionContext(8), closure,
- context, effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateFunctionContext(8, FUNCTION_SCOPE),
+ closure, context, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 780bf65df3..e8bbc33578 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -129,37 +129,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
// -----------------------------------------------------------------------------
-// %_IsRegExp
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsRegExp, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch, if_false;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, IsFalseConstant(),
- IsNumberEqual(IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false)),
- effect, _),
- IsNumberConstant(JS_REGEXP_TYPE)),
- IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsJSReceiver
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 6883052abb..979d146164 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -111,6 +111,25 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNumber) {
EXPECT_THAT(r.replacement(), IsNumberToBoolean(input));
}
+TEST_F(JSTypedLoweringTest, JSToBooleanWithDetectableReceiverOrNull) {
+ Node* input = Parameter(Type::DetectableReceiverOrNull(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsReferenceEqual(input, IsNullConstant())));
+}
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithReceiverOrNullOrUndefined) {
+ Node* input = Parameter(Type::ReceiverOrNullOrUndefined(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ToBoolean(ToBooleanHint::kAny), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsBooleanNot(IsObjectIsUndetectable(input)));
+}
+
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
@@ -251,7 +270,7 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
graph()->NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
lhs, rhs, context, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
+ EXPECT_THAT(r.replacement(), IsReferenceEqual(lhs, rhs));
}
@@ -504,17 +523,15 @@ TEST_F(JSTypedLoweringTest, JSLoadContext) {
static bool kBooleans[] = {false, true};
TRACED_FOREACH(size_t, index, kIndices) {
TRACED_FOREACH(bool, immutable, kBooleans) {
- Reduction const r1 = Reduce(
- graph()->NewNode(javascript()->LoadContext(0, index, immutable),
- context, context, effect));
+ Reduction const r1 = Reduce(graph()->NewNode(
+ javascript()->LoadContext(0, index, immutable), context, effect));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(),
IsLoadField(AccessBuilder::ForContextSlot(index), context,
effect, graph()->start()));
- Reduction const r2 = Reduce(
- graph()->NewNode(javascript()->LoadContext(1, index, immutable),
- context, context, effect));
+ Reduction const r2 = Reduce(graph()->NewNode(
+ javascript()->LoadContext(1, index, immutable), context, effect));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsLoadField(AccessBuilder::ForContextSlot(index),
@@ -540,16 +557,16 @@ TEST_F(JSTypedLoweringTest, JSStoreContext) {
Node* const value = Parameter(type);
Reduction const r1 =
- Reduce(graph()->NewNode(javascript()->StoreContext(0, index), context,
- value, context, effect, control));
+ Reduce(graph()->NewNode(javascript()->StoreContext(0, index), value,
+ context, effect, control));
ASSERT_TRUE(r1.Changed());
EXPECT_THAT(r1.replacement(),
IsStoreField(AccessBuilder::ForContextSlot(index), context,
value, effect, control));
Reduction const r2 =
- Reduce(graph()->NewNode(javascript()->StoreContext(1, index), context,
- value, context, effect, control));
+ Reduce(graph()->NewNode(javascript()->StoreContext(1, index), value,
+ context, effect, control));
ASSERT_TRUE(r2.Changed());
EXPECT_THAT(r2.replacement(),
IsStoreField(AccessBuilder::ForContextSlot(index),
@@ -580,13 +597,12 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
Node* key = Parameter(
Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
- Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
- base, key, vector, context,
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->LoadProperty(feedback), base, key,
+ context, EmptyFrameState(), effect, control));
Matcher<Node*> offset_matcher =
element_size == 1
@@ -622,13 +638,12 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
if (min > max) std::swap(min, max);
Node* key = Parameter(Type::Range(min, max, zone()));
Node* base = HeapConstant(array);
- Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
- base, key, vector, context,
- EmptyFrameState(), effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->LoadProperty(feedback), base, key,
+ context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -660,13 +675,12 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
Node* base = HeapConstant(array);
Node* value =
Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
- Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ Node* node = graph()->NewNode(op, base, key, value, context,
EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
@@ -703,8 +717,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Node* key = Parameter(
Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
- Node* value = Parameter(Type::Any());
- Node* vector = UndefinedConstant();
+ Node* value = Parameter(Type::PlainPrimitive());
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -714,7 +727,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
EmptyFrameState(), effect, control);
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ Node* node = graph()->NewNode(op, base, key, value, context,
EmptyFrameState(), checkpoint, control);
Reduction r = Reduce(node);
@@ -724,10 +737,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
: IsNumberShiftLeft(
key, IsNumberConstant(WhichPowerOf2(element_size)));
- Matcher<Node*> value_matcher =
- IsToNumber(value, context, checkpoint, control);
- Matcher<Node*> effect_matcher = value_matcher;
- Matcher<Node*> control_matcher = IsIfSuccess(value_matcher);
+ Matcher<Node*> value_matcher = IsPlainPrimitiveToNumber(value);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -736,7 +746,7 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
BufferAccess(type),
IsPointerConstant(bit_cast<intptr_t>(&backing_store[0])),
offset_matcher, IsNumberConstant(array->byte_length()->Number()),
- value_matcher, effect_matcher, control_matcher));
+ value_matcher, checkpoint, control));
}
}
}
@@ -759,13 +769,12 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
Node* key = Parameter(Type::Range(min, max, zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(access.type);
- Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ Node* node = graph()->NewNode(op, base, key, value, context,
EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
@@ -788,13 +797,12 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
VectorSlotPair feedback;
Handle<Name> name = factory()->length_string();
Node* const receiver = Parameter(Type::String(), 0);
- Node* const vector = Parameter(Type::Internal(), 1);
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
- vector, context, EmptyFrameState(), effect, control));
+ Reduction const r =
+ Reduce(graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
+ context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
receiver, effect, control));
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index f11d6dff18..5d2ec5fc98 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -28,7 +28,8 @@ class LivenessAnalysisTest : public GraphTest {
jsgraph_(isolate(), graph(), common(), &javascript_, nullptr,
&machine_),
analyzer_(locals_count, false, zone()),
- empty_values_(graph()->NewNode(common()->StateValues(0), 0, nullptr)),
+ empty_values_(graph()->NewNode(
+ common()->StateValues(0, SparseInputMask::Dense()), 0, nullptr)),
next_checkpoint_id_(0),
current_block_(nullptr) {}
@@ -48,7 +49,8 @@ class LivenessAnalysisTest : public GraphTest {
int ast_num = next_checkpoint_id_++;
int first_const = intconst_from_bailout_id(ast_num, locals_count_);
- const Operator* locals_op = common()->StateValues(locals_count_);
+ const Operator* locals_op =
+ common()->StateValues(locals_count_, SparseInputMask::Dense());
ZoneVector<Node*> local_inputs(locals_count_, nullptr, zone());
for (int i = 0; i < locals_count_; i++) {
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
index 81393941bb..8d34fb9699 100644
--- a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
@@ -125,11 +125,9 @@ TEST_F(LoadEliminationTest, LoadFieldAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess const access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -154,11 +152,9 @@ TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
Node* value = Parameter(Type::Any(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -178,17 +174,55 @@ TEST_F(LoadEliminationTest, StoreFieldAndLoadField) {
EXPECT_EQ(value, r.replacement());
}
+TEST_F(LoadEliminationTest, StoreFieldAndKillFields) {
+ Node* object = Parameter(Type::Any(), 0);
+ Node* value = Parameter(Type::Any(), 1);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ FieldAccess access1 = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ // Offset that out of field cache size.
+ FieldAccess access2 = {kTaggedBase, 2048 * kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
+
+ StrictMock<MockAdvancedReducerEditor> editor;
+ LoadElimination load_elimination(&editor, jsgraph(), zone());
+
+ load_elimination.Reduce(graph()->start());
+
+ Node* store1 = effect = graph()->NewNode(simplified()->StoreField(access1),
+ object, value, effect, control);
+ load_elimination.Reduce(store1);
+
+ // Invalidate caches of object.
+ Node* store2 = effect = graph()->NewNode(simplified()->StoreField(access2),
+ object, value, effect, control);
+ load_elimination.Reduce(store2);
+
+ Node* store3 = graph()->NewNode(simplified()->StoreField(access1),
+ object, value, effect, control);
+
+ Reduction r = load_elimination.Reduce(store3);
+
+ // store3 shall not be replaced, since caches were invalidated.
+ EXPECT_EQ(store3, r.replacement());
+}
+
TEST_F(LoadEliminationTest, StoreFieldAndStoreElementAndLoadField) {
Node* object = Parameter(Type::Any(), 0);
Node* value = Parameter(Type::Any(), 1);
Node* index = Parameter(Type::UnsignedSmall(), 2);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -288,11 +322,9 @@ TEST_F(LoadEliminationTest, LoadFieldOnFalseBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess const access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -326,11 +358,9 @@ TEST_F(LoadEliminationTest, LoadFieldOnTrueBranchOfDiamond) {
Node* check = Parameter(Type::Boolean(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess const access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -364,11 +394,9 @@ TEST_F(LoadEliminationTest, LoadFieldWithTypeMismatch) {
Node* value = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Unsigned31(),
- MachineType::AnyTagged(),
+ FieldAccess const access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Unsigned31(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
@@ -422,11 +450,9 @@ TEST_F(LoadEliminationTest, AliasAnalysisForFinishRegion) {
Node* value1 = Parameter(Type::Signed32(), 1);
Node* effect = graph()->start();
Node* control = graph()->start();
- FieldAccess const access = {kTaggedBase,
- kPointerSize,
- MaybeHandle<Name>(),
- Type::Signed32(),
- MachineType::AnyTagged(),
+ FieldAccess const access = {kTaggedBase, kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Signed32(), MachineType::AnyTagged(),
kNoWriteBarrier};
StrictMock<MockAdvancedReducerEditor> editor;
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 1d29d9733f..4f1946c379 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -1183,12 +1183,16 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(MachineRepresentation::kWord32,
- IsInt32LessThan(p0, IsInt32Constant(0)),
- IsInt32Sub(IsInt32Constant(0),
- IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
- IsInt32Constant(mask))),
- IsWord32And(p0, IsInt32Constant(mask))));
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsInt32Sub(IsInt32Constant(0),
+ IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
+ IsInt32Constant(mask))),
+ IsWord32And(p0, IsInt32Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt32LessThan(p0, IsInt32Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt32LessThan(p0, IsInt32Constant(0)),
+ graph()->start())))));
}
TRACED_FORRANGE(int32_t, shift, 1, 31) {
Reduction const r = Reduce(graph()->NewNode(
@@ -1199,12 +1203,16 @@ TEST_F(MachineOperatorReducerTest, Int32ModWithConstant) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(MachineRepresentation::kWord32,
- IsInt32LessThan(p0, IsInt32Constant(0)),
- IsInt32Sub(IsInt32Constant(0),
- IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
- IsInt32Constant(mask))),
- IsWord32And(p0, IsInt32Constant(mask))));
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsInt32Sub(IsInt32Constant(0),
+ IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
+ IsInt32Constant(mask))),
+ IsWord32And(p0, IsInt32Constant(mask)),
+ IsMerge(IsIfTrue(IsBranch(IsInt32LessThan(p0, IsInt32Constant(0)),
+ graph()->start())),
+ IsIfFalse(IsBranch(IsInt32LessThan(p0, IsInt32Constant(0)),
+ graph()->start())))));
}
TRACED_FOREACH(int32_t, divisor, kInt32Values) {
if (divisor == 0 || base::bits::IsPowerOfTwo32(Abs(divisor))) continue;
@@ -2077,8 +2085,19 @@ TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithFloat32Constant) {
// -----------------------------------------------------------------------------
-// Store
+// Float64RoundDown
+TEST_F(MachineOperatorReducerTest, Float64RoundDownWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64RoundDown().placeholder(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Constant(Floor(x)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Store
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
const StoreRepresentation rep(MachineRepresentation::kWord8, kNoWriteBarrier);
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 1698614760..d1336940a3 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -1270,7 +1270,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
- if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
@@ -1283,23 +1283,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1313,30 +1304,21 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMaddfS, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
- if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ if (!IsMipsArchVariant(kMips32r2)) {
return;
}
{
@@ -1349,23 +1331,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1379,23 +1352,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMaddfD, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1404,83 +1368,59 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const p2 = m.Parameter(2);
- Node* n = nullptr;
-
- if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ if (!IsMipsArchVariant(kMips32r2)) {
return;
}
+ {
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n = nullptr;
- if (IsMipsArchVariant(kMips32r2)) {
n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
- } else if (IsMipsArchVariant(kMips32r6)) {
- n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
- }
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMsubfS, s[0]->arch_opcode());
- }
- ASSERT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const p2 = m.Parameter(2);
- Node* n = nullptr;
-
- if (!IsMipsArchVariant(kMips32r2) && !IsMipsArchVariant(kMips32r6)) {
+ if (!IsMipsArchVariant(kMips32r2)) {
return;
}
+ {
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n = nullptr;
- if (IsMipsArchVariant(kMips32r2)) {
n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
- } else if (IsMipsArchVariant(kMips32r6)) {
- n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
- }
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- if (IsMipsArchVariant(kMips32r2)) {
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_EQ(kMipsMsubfD, s[0]->arch_opcode());
- }
- ASSERT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- if (IsMipsArchVariant(kMips32r2)) {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (IsMipsArchVariant(kMips32r6)) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) {
@@ -1514,6 +1454,18 @@ TEST_F(InstructionSelectorTest, Float64Min) {
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Word32ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsByteSwap32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index b0e82e4316..97fd7bfc37 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -1360,14 +1360,13 @@ const MemoryAccessImm kMemoryAccessesImm[] = {
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
-
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
{MachineType::Int8(),
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {MachineType::Int8(),
+ {MachineType::Uint8(),
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
@@ -1377,7 +1376,7 @@ const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {MachineType::Int16(),
+ {MachineType::Uint16(),
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
@@ -1601,11 +1600,9 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
- // kMips64Dadd is expected opcode
- // size more than 16 bits wide
- EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
- EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -1621,13 +1618,11 @@ TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
- ASSERT_EQ(2U, s.size());
- // kMips64Add is expected opcode
- // size more than 16 bits wide
- EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
- EXPECT_EQ(kMode_None, s[0]->addressing_mode());
- EXPECT_EQ(2U, s[0]->InputCount());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1757,6 +1752,9 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
}
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
+ if (kArchVariant != kMips64r2) {
+ return;
+ }
{
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
@@ -1767,23 +1765,14 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
- EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1797,29 +1786,23 @@ TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
- EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MaddfS, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMips64MaddS, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
}
TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
+ if (kArchVariant != kMips64r2) {
+ return;
+ }
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
@@ -1830,23 +1813,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
- EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1860,23 +1834,14 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
- EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MaddfD, s[0]->arch_opcode());
- }
+ EXPECT_EQ(kMips64MaddD, s[0]->arch_opcode());
ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
- EXPECT_FALSE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- }
+ EXPECT_FALSE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
@@ -1885,73 +1850,57 @@ TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
MachineType::Float32(), MachineType::Float32());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const p2 = m.Parameter(2);
- Node* n;
- if (kArchVariant == kMips64r2) {
- n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
- } else if (kArchVariant == kMips64r6) {
- n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
+ if (kArchVariant != kMips64r2) {
+ return;
}
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
+ {
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n;
+ n = m.Float32Sub(m.Float32Mul(p1, p2), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubS, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MsubfS, s[0]->arch_opcode());
- }
- ASSERT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
MachineType::Float64(), MachineType::Float64());
- Node* const p0 = m.Parameter(0);
- Node* const p1 = m.Parameter(1);
- Node* const p2 = m.Parameter(2);
- Node* n;
- if (kArchVariant == kMips64r2) {
- n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
- } else if (kArchVariant == kMips64r6) {
- n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
+ if (kArchVariant != kMips64r2) {
+ return;
}
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- if (kArchVariant == kMips64r2) {
+ {
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* n;
+ n = m.Float64Sub(m.Float64Mul(p1, p2), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
EXPECT_EQ(kMips64MsubD, s[0]->arch_opcode());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_EQ(kMips64MsubfD, s[0]->arch_opcode());
- }
- ASSERT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
- EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- if (kArchVariant == kMips64r2) {
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_FALSE(
UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
- } else if (kArchVariant == kMips64r6) {
- EXPECT_TRUE(
- UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TEST_F(InstructionSelectorTest, Float64Max) {
@@ -2015,6 +1964,30 @@ TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
}
}
+TEST_F(InstructionSelectorTest, Word32ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64ByteSwap32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64ByteSwap64, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 8352691644..8e8ccf0be6 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/objects.h"
using testing::_;
@@ -803,32 +804,6 @@ class IsTailCallMatcher final : public NodeMatcher {
const Matcher<Node*> control_matcher_;
};
-
-class IsReferenceEqualMatcher final : public NodeMatcher {
- public:
- IsReferenceEqualMatcher(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher)
- : NodeMatcher(IrOpcode::kReferenceEqual),
- type_matcher_(type_matcher),
- lhs_matcher_(lhs_matcher),
- rhs_matcher_(rhs_matcher) {}
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- // TODO(bmeurer): The type parameter is currently ignored.
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
- lhs_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
- rhs_matcher_, listener));
- }
-
- private:
- const Matcher<Type*> type_matcher_;
- const Matcher<Node*> lhs_matcher_;
- const Matcher<Node*> rhs_matcher_;
-};
-
class IsSpeculativeBinopMatcher final : public NodeMatcher {
public:
IsSpeculativeBinopMatcher(IrOpcode::Value opcode,
@@ -1364,24 +1339,24 @@ STORE_MATCHER(UnalignedStore)
class IsStackSlotMatcher final : public NodeMatcher {
public:
- explicit IsStackSlotMatcher(const Matcher<MachineRepresentation>& rep_matcher)
- : NodeMatcher(IrOpcode::kStackSlot), rep_matcher_(rep_matcher) {}
+ explicit IsStackSlotMatcher(const Matcher<int>& size_matcher)
+ : NodeMatcher(IrOpcode::kStackSlot), size_matcher_(size_matcher) {}
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
+ *os << " whose size (";
+ size_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineRepresentation>(node),
- "rep", rep_matcher_, listener));
+ PrintMatchAndExplain(OpParameter<int>(node), "size", size_matcher_,
+ listener));
}
private:
- const Matcher<MachineRepresentation> rep_matcher_;
+ const Matcher<int> size_matcher_;
};
class IsToNumberMatcher final : public NodeMatcher {
@@ -2072,13 +2047,6 @@ Matcher<Node*> IsTailCall(
effect_matcher, control_matcher));
}
-Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& lhs_matcher,
- const Matcher<Node*>& rhs_matcher) {
- return MakeMatcher(
- new IsReferenceEqualMatcher(type_matcher, lhs_matcher, rhs_matcher));
-}
-
#define DEFINE_SPECULATIVE_BINOP_MATCHER(opcode) \
Matcher<Node*> Is##opcode(const Matcher<NumberOperationHint>& hint_matcher, \
const Matcher<Node*>& lhs_matcher, \
@@ -2207,8 +2175,8 @@ Matcher<Node*> IsUnalignedStore(
control_matcher));
}
-Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher) {
- return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
+Matcher<Node*> IsStackSlot(const Matcher<int>& size_matcher) {
+ return MakeMatcher(new IsStackSlotMatcher(size_matcher));
}
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
@@ -2281,6 +2249,7 @@ IS_BINOP_MATCHER(NumberAtan2)
IS_BINOP_MATCHER(NumberMax)
IS_BINOP_MATCHER(NumberMin)
IS_BINOP_MATCHER(NumberPow)
+IS_BINOP_MATCHER(ReferenceEqual)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Or)
IS_BINOP_MATCHER(Word32Xor)
@@ -2305,6 +2274,7 @@ IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
+IS_BINOP_MATCHER(Int64Mul)
IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(Float32Equal)
IS_BINOP_MATCHER(Float32LessThan)
@@ -2380,6 +2350,7 @@ IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(PlainPrimitiveToNumber)
IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(ObjectIsUndetectable)
IS_UNOP_MATCHER(StringFromCharCode)
IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index fa5ae02dea..11e2704a74 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -205,8 +205,7 @@ Matcher<Node*> IsTailCall(
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
-Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
- const Matcher<Node*>& lhs_matcher,
+Matcher<Node*> IsReferenceEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
@@ -311,6 +310,7 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsObjectIsReceiver(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsObjectIsSmi(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsObjectIsUndetectable(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -333,7 +333,7 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsStackSlot(const Matcher<MachineRepresentation>& rep_matcher);
+Matcher<Node*> IsStackSlot(const Matcher<int>& size_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
@@ -385,6 +385,8 @@ Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Mul(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsBitcastTaggedToWord(const Matcher<Node*>& input_matcher);
diff --git a/deps/v8/test/unittests/compiler/regalloc/OWNERS b/deps/v8/test/unittests/compiler/regalloc/OWNERS
new file mode 100644
index 0000000000..88646a1206
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/regalloc/OWNERS
@@ -0,0 +1,5 @@
+set noparent
+
+bmeurer@chromium.org
+jarin@chromium.org
+mtrofin@chromium.org \ No newline at end of file
diff --git a/deps/v8/test/unittests/compiler/live-range-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
index e4fc2ca151..fc7b268b44 100644
--- a/deps/v8/test/unittests/compiler/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "test/unittests/compiler/live-range-builder.h"
#include "test/unittests/test-utils.h"
-
// TODO(mtrofin): would we want to centralize this definition?
#ifdef DEBUG
#define V8_ASSERT_DEBUG_DEATH(statement, regex) \
@@ -29,7 +27,6 @@ class LiveRangeUnitTest : public TestWithZone {
return range->SplitAt(LifetimePosition::FromInt(pos), zone());
}
-
TopLevelLiveRange* Splinter(TopLevelLiveRange* top, int start, int end,
int new_id = 0) {
if (top->splinter() == nullptr) {
@@ -70,7 +67,6 @@ class LiveRangeUnitTest : public TestWithZone {
}
};
-
TEST_F(LiveRangeUnitTest, InvalidConstruction) {
// Build a range manually, because the builder guards against empty cases.
TopLevelLiveRange* range =
@@ -81,31 +77,26 @@ TEST_F(LiveRangeUnitTest, InvalidConstruction) {
".*");
}
-
TEST_F(LiveRangeUnitTest, SplitInvalidStart) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
V8_ASSERT_DEBUG_DEATH(Split(range, 0), ".*");
}
-
TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(InvalidSplitEnd)) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
ASSERT_DEATH_IF_SUPPORTED(Split(range, 1), ".*");
}
-
TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPreStart)) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(1, 2);
ASSERT_DEATH_IF_SUPPORTED(Split(range, 0), ".*");
}
-
TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPostEnd)) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
ASSERT_DEATH_IF_SUPPORTED(Split(range, 2), ".*");
}
-
TEST_F(LiveRangeUnitTest, SplitSingleIntervalNoUsePositions) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 2);
LiveRange* child = Split(range, 1);
@@ -119,7 +110,6 @@ TEST_F(LiveRangeUnitTest, SplitSingleIntervalNoUsePositions) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsBetween) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
@@ -134,7 +124,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsBetween) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsFront) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
@@ -150,7 +139,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsFront) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsAfter) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
@@ -166,7 +154,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsAfter) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositions) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
@@ -184,7 +171,6 @@ TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositions) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositionsAtPos) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
@@ -201,7 +187,6 @@ TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositionsAtPos) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsBetween) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
@@ -218,7 +203,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsBetween) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAtInterval) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(4).Build();
@@ -235,7 +219,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAtInterval) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsFront) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
@@ -252,7 +235,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsFront) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAfter) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
@@ -268,7 +250,6 @@ TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAfter) {
EXPECT_TRUE(RangesMatch(expected_bottom, child));
}
-
TEST_F(LiveRangeUnitTest, SplinterSingleInterval) {
TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 6);
TopLevelLiveRange* splinter = Splinter(range, 3, 5);
@@ -283,7 +264,6 @@ TEST_F(LiveRangeUnitTest, SplinterSingleInterval) {
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
-
TEST_F(LiveRangeUnitTest, MergeSingleInterval) {
TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 6);
TopLevelLiveRange* splinter = Splinter(original, 3, 5);
@@ -296,7 +276,6 @@ TEST_F(LiveRangeUnitTest, MergeSingleInterval) {
EXPECT_TRUE(RangesMatch(result, original));
}
-
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsOutside) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -313,7 +292,6 @@ TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsOutside) {
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
-
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsOutside) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -327,14 +305,12 @@ TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsOutside) {
EXPECT_TRUE(RangesMatch(result, original));
}
-
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsInside) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
V8_ASSERT_DEBUG_DEATH(Splinter(range, 3, 5), ".*");
}
-
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsLeft) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -350,7 +326,6 @@ TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsLeft) {
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
-
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsLeft) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -363,7 +338,6 @@ TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsLeft) {
EXPECT_TRUE(RangesMatch(result, original));
}
-
TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsRight) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -379,7 +353,6 @@ TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsRight) {
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
-
TEST_F(LiveRangeUnitTest, SplinterMergeMultipleTimes) {
TopLevelLiveRange* range =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 10).Add(12, 16).Build();
@@ -398,7 +371,6 @@ TEST_F(LiveRangeUnitTest, SplinterMergeMultipleTimes) {
EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
}
-
TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsRight) {
TopLevelLiveRange* original =
TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
@@ -413,7 +385,6 @@ TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsRight) {
EXPECT_TRUE(RangesMatch(result, original));
}
-
TEST_F(LiveRangeUnitTest, MergeAfterSplitting) {
TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 8);
TopLevelLiveRange* splinter = Splinter(original, 4, 6);
@@ -430,7 +401,6 @@ TEST_F(LiveRangeUnitTest, MergeAfterSplitting) {
EXPECT_TRUE(RangesMatch(result, original));
}
-
TEST_F(LiveRangeUnitTest, IDGeneration) {
TopLevelLiveRange* vreg = TestRangeBuilder(zone()).Id(2).Build(0, 100);
EXPECT_EQ(2, vreg->vreg());
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
index 71571488e1..d61543a252 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/move-optimizer-unittest.cc
@@ -98,7 +98,6 @@ class MoveOptimizerTest : public InstructionSequenceTest {
}
};
-
TEST_F(MoveOptimizerTest, RemovesRedundant) {
StartBlock();
auto first_instr = EmitNop();
@@ -127,7 +126,6 @@ TEST_F(MoveOptimizerTest, RemovesRedundant) {
CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
}
-
TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
int index1 = GetAllocatableCode(0);
int index2 = GetAllocatableCode(1);
@@ -167,7 +165,6 @@ TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
CHECK(Contains(move, FPReg(f32_1, kFloat32), ExplicitFPReg(f32_2, kFloat32)));
}
-
TEST_F(MoveOptimizerTest, SplitsConstants) {
StartBlock();
EndBlock(Last());
@@ -191,7 +188,6 @@ TEST_F(MoveOptimizerTest, SplitsConstants) {
CHECK(Contains(move, Reg(0), Slot(2)));
}
-
TEST_F(MoveOptimizerTest, SimpleMerge) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
@@ -227,7 +223,6 @@ TEST_F(MoveOptimizerTest, SimpleMerge) {
CHECK(Contains(move, FPReg(kF32_1, kFloat32), FPReg(kF32_2, kFloat32)));
}
-
TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
@@ -279,7 +274,6 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
CHECK(Contains(move, FPReg(kF32_2, kFloat32), FPReg(kF32_1, kFloat32)));
}
-
TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
StartBlock();
int const_index = 1;
@@ -317,7 +311,6 @@ TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
CHECK_EQ(1, assignment);
}
-
TEST_F(MoveOptimizerTest, SubsetMovesMerge) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
@@ -354,7 +347,6 @@ TEST_F(MoveOptimizerTest, SubsetMovesMerge) {
CHECK(Contains(b2_move, Reg(4), Reg(5)));
}
-
TEST_F(MoveOptimizerTest, GapConflictSubsetMovesDoNotMerge) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
index 0533ee7406..4ae2ee9acd 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/register-allocator-unittest.cc
@@ -9,7 +9,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-
namespace {
// We can't just use the size of the moves collection, because of
@@ -23,7 +22,6 @@ int GetMoveCount(const ParallelMove& moves) {
return move_count;
}
-
bool AreOperandsOfSameType(
const AllocatedOperand& op,
const InstructionSequenceTest::TestOperand& test_op) {
@@ -36,7 +34,6 @@ bool AreOperandsOfSameType(
(op.IsStackSlot() && !test_op_is_reg);
}
-
bool AllocatedOperandMatches(
const AllocatedOperand& op,
const InstructionSequenceTest::TestOperand& test_op) {
@@ -46,7 +43,6 @@ bool AllocatedOperandMatches(
test_op.value_ == InstructionSequenceTest::kNoValue);
}
-
int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
const InstructionSequence* sequence) {
const ParallelMove* moves =
@@ -55,7 +51,6 @@ int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
return GetMoveCount(*moves);
}
-
bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
const InstructionSequence* sequence,
const InstructionSequenceTest::TestOperand& src,
@@ -79,7 +74,6 @@ bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
} // namespace
-
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
@@ -88,7 +82,6 @@ class RegisterAllocatorTest : public InstructionSequenceTest {
}
};
-
TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
// return p0 + p1;
StartBlock();
@@ -136,7 +129,6 @@ TEST_F(RegisterAllocatorTest, SimpleLoop) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SimpleBranch) {
// return i ? K1 : K2
StartBlock();
@@ -154,7 +146,6 @@ TEST_F(RegisterAllocatorTest, SimpleBranch) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SimpleDiamond) {
// return p0 ? p0 : p0
StartBlock();
@@ -174,7 +165,6 @@ TEST_F(RegisterAllocatorTest, SimpleDiamond) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
// return i ? K1 : K2
StartBlock();
@@ -195,7 +185,6 @@ TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
const int kPhis = kDefaultNRegs * 2;
@@ -227,7 +216,6 @@ TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
const int kPhis = kDefaultNRegs * 2;
@@ -266,7 +254,6 @@ TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
const size_t kNumRegs = 3;
const size_t kParams = kNumRegs + 1;
@@ -315,17 +302,16 @@ TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SpillPhi) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
- auto left = Define(Reg(GetAllocatableCode(0)));
+ auto left = Define(Reg(0));
EndBlock(Jump(2));
StartBlock();
- auto right = Define(Reg(GetAllocatableCode(0)));
+ auto right = Define(Reg(0));
EndBlock();
StartBlock();
@@ -337,7 +323,6 @@ TEST_F(RegisterAllocatorTest, SpillPhi) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
StartBlock();
VReg constants[kDefaultNRegs];
@@ -357,7 +342,6 @@ TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
const int kNumRegs = 6;
SetNumRegs(kNumRegs, kNumRegs);
@@ -383,7 +367,6 @@ TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
const int kNumRegs = 6;
SetNumRegs(kNumRegs, kNumRegs);
@@ -408,7 +391,6 @@ TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
// Outer diamond.
StartBlock();
@@ -455,7 +437,6 @@ TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
// Outer diamond.
StartBlock();
@@ -502,7 +483,6 @@ TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
StartBlock();
@@ -529,7 +509,6 @@ TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
StartBlock();
auto p_0 = Parameter(Reg(1));
@@ -539,7 +518,6 @@ TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
StartBlock();
// Fill registers.
@@ -574,7 +552,6 @@ TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
StartBlock();
auto x = EmitOI(Reg(0));
@@ -595,7 +572,6 @@ TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
StartBlock();
auto x = EmitOI(Reg(0));
@@ -616,7 +592,6 @@ TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
Allocate();
}
-
TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
StartBlock(); // B0
auto var = EmitOI(Reg(0));
@@ -655,7 +630,6 @@ TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
sequence(), Reg(0), Slot(0)));
}
-
TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
if (!FLAG_turbo_preprocess_ranges) return;
@@ -706,12 +680,10 @@ TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
Slot(var3_slot), Reg()));
-
EXPECT_EQ(0,
GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
}
-
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
@@ -738,7 +710,6 @@ class SlotConstraintTest : public RegisterAllocatorTest,
} // namespace
-
#if GTEST_HAS_COMBINE
TEST_P(SlotConstraintTest, SlotConstraint) {
@@ -785,7 +756,6 @@ TEST_P(SlotConstraintTest, SlotConstraint) {
Allocate();
}
-
INSTANTIATE_TEST_CASE_P(
RegisterAllocatorTest, SlotConstraintTest,
::testing::Combine(::testing::ValuesIn(kParameterTypes),
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 6f37609f3a..f5ae91d7c1 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -336,6 +336,22 @@ TEST_F(SimplifiedOperatorReducerTest, TruncateTaggedToWord32WithConstant) {
}
// -----------------------------------------------------------------------------
+// CheckedFloat64ToInt32
+
+TEST_F(SimplifiedOperatorReducerTest, CheckedFloat64ToInt32WithConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ TRACED_FOREACH(int32_t, n, kInt32Values) {
+ Reduction r = Reduce(
+ graph()->NewNode(simplified()->CheckedFloat64ToInt32(
+ CheckForMinusZeroMode::kDontCheckForMinusZero),
+ Float64Constant(n), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(n));
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckHeapObject
TEST_F(SimplifiedOperatorReducerTest, CheckHeapObjectWithChangeBitToTagged) {
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
index 311b90a8d1..388dd56247 100644
--- a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/state-values-utils.h"
+#include "src/bit-vector.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "test/unittests/test-utils.h"
@@ -18,8 +19,9 @@ class StateValuesIteratorTest : public GraphTest {
Node* StateValuesFromVector(NodeVector* nodes) {
int count = static_cast<int>(nodes->size());
- return graph()->NewNode(common()->StateValues(count), count,
- count == 0 ? nullptr : &(nodes->front()));
+ return graph()->NewNode(
+ common()->StateValues(count, SparseInputMask::Dense()), count,
+ count == 0 ? nullptr : &(nodes->front()));
}
};
@@ -107,7 +109,8 @@ TEST_F(StateValuesIteratorTest, TreeFromVector) {
// Build the tree.
StateValuesCache builder(&jsgraph);
Node* values_node = builder.GetNodeForValues(
- inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ nullptr);
// Check the tree contents with vector.
int i = 0;
@@ -119,6 +122,46 @@ TEST_F(StateValuesIteratorTest, TreeFromVector) {
}
}
+TEST_F(StateValuesIteratorTest, TreeFromVectorWithLiveness) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+ // Generate the input liveness.
+ BitVector liveness(count, zone());
+ for (int i = 0; i < count; i++) {
+ if (i % 3 == 0) {
+ liveness.Add(i);
+ }
+ }
+
+ // Build the tree.
+ StateValuesCache builder(&jsgraph);
+ Node* values_node = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ &liveness);
+
+ // Check the tree contents with vector.
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(values_node)) {
+ if (liveness.Contains(i)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ } else {
+ EXPECT_EQ(node.node, nullptr);
+ }
+ i++;
+ }
+ EXPECT_EQ(inputs.size(), static_cast<size_t>(i));
+ }
+}
TEST_F(StateValuesIteratorTest, BuildTreeIdentical) {
int sizes[] = {0, 1, 2, 100, 5000, 30000};
@@ -137,9 +180,46 @@ TEST_F(StateValuesIteratorTest, BuildTreeIdentical) {
// Build two trees from the same data.
StateValuesCache builder(&jsgraph);
Node* node1 = builder.GetNodeForValues(
- inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ nullptr);
+ Node* node2 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ nullptr);
+
+ // The trees should be equal since the data was the same.
+ EXPECT_EQ(node1, node2);
+ }
+}
+
+TEST_F(StateValuesIteratorTest, BuildTreeWithLivenessIdentical) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+ // Generate the input liveness.
+ BitVector liveness(count, zone());
+ for (int i = 0; i < count; i++) {
+ if (i % 3 == 0) {
+ liveness.Add(i);
+ }
+ }
+
+ // Build two trees from the same data.
+ StateValuesCache builder(&jsgraph);
+ Node* node1 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ &liveness);
Node* node2 = builder.GetNodeForValues(
- inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size(),
+ &liveness);
// The trees should be equal since the data was the same.
EXPECT_EQ(node1, node2);
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 3ef436e5c8..c9b5c9db34 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -223,8 +223,8 @@ class TyperTest : public TypedGraphTest {
namespace {
-int32_t shift_left(int32_t x, int32_t y) { return x << y; }
-int32_t shift_right(int32_t x, int32_t y) { return x >> y; }
+int32_t shift_left(int32_t x, int32_t y) { return x << (y & 0x1f); }
+int32_t shift_right(int32_t x, int32_t y) { return x >> (y & 0x1f); }
int32_t bit_or(int32_t x, int32_t y) { return x | y; }
int32_t bit_and(int32_t x, int32_t y) { return x & y; }
int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }