summaryrefslogtreecommitdiff
path: root/deps/v8/test/unittests
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/unittests')
-rw-r--r--deps/v8/test/unittests/BUILD.gn4
-rw-r--r--deps/v8/test/unittests/api/resource-constraints-unittest.cc56
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc5
-rw-r--r--deps/v8/test/unittests/base/vlq-base64-unittest.cc137
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc119
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/branch-elimination-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc966
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h4
-rw-r--r--deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc17
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h5
-rw-r--r--deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc63
-rw-r--r--deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc13
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc9
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/typed-optimization-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/heap/heap-controller-unittest.cc59
-rw-r--r--deps/v8/test/unittests/heap/heap-unittest.cc73
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc8
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc10
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc18
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc43
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h4
-rw-r--r--deps/v8/test/unittests/logging/counters-unittest.cc48
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc63
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc4
-rw-r--r--deps/v8/test/unittests/torque/earley-parser-unittest.cc2
-rw-r--r--deps/v8/test/unittests/torque/ls-message-unittest.cc85
-rw-r--r--deps/v8/test/unittests/torque/ls-server-data-unittest.cc24
-rw-r--r--deps/v8/test/unittests/torque/torque-unittest.cc108
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc224
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc78
-rw-r--r--deps/v8/test/unittests/wasm/wasm-text-unittest.cc121
53 files changed, 1542 insertions, 930 deletions
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 39af3fbc06..87013f9fbc 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -50,6 +50,7 @@ v8_source_set("unittests_sources") {
"api/interceptor-unittest.cc",
"api/isolate-unittest.cc",
"api/remote-object-unittest.cc",
+ "api/resource-constraints-unittest.cc",
"api/v8-object-unittest.cc",
"asmjs/asm-scanner-unittest.cc",
"asmjs/asm-types-unittest.cc",
@@ -76,6 +77,7 @@ v8_source_set("unittests_sources") {
"base/template-utils-unittest.cc",
"base/threaded-list-unittest.cc",
"base/utils/random-number-generator-unittest.cc",
+ "base/vlq-base64-unittest.cc",
"codegen/code-stub-assembler-unittest.cc",
"codegen/code-stub-assembler-unittest.h",
"codegen/register-configuration-unittest.cc",
@@ -223,6 +225,7 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
+ "wasm/wasm-text-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
@@ -299,6 +302,7 @@ v8_source_set("unittests_sources") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../../third_party/inspector_protocol:encoding_test",
+ "../../third_party/inspector_protocol:bindings_test",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
diff --git a/deps/v8/test/unittests/api/resource-constraints-unittest.cc b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
new file mode 100644
index 0000000000..4c9b7f33dd
--- /dev/null
+++ b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
@@ -0,0 +1,56 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+
+TEST(ResourceConstraints, ConfigureDefaultsFromHeapSizeSmall) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaultsFromHeapSize(1 * MB, 1 * MB);
+ ASSERT_EQ(i::Heap::MinOldGenerationSize(),
+ constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 512 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_young_generation_size_in_bytes());
+}
+
+TEST(ResourceConstraints, ConfigureDefaultsFromHeapSizeLarge) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaultsFromHeapSize(100u * MB, 3000u * MB);
+ ASSERT_EQ(3000u * MB - 3 * 8192 * pm * KB,
+ constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 8192 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(100u * MB - 3 * 512 * pm * KB,
+ constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 512 * pm * KB,
+ constraints.initial_young_generation_size_in_bytes());
+}
+
+TEST(ResourceConstraints, ConfigureDefaults) {
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ v8::ResourceConstraints constraints;
+ constraints.ConfigureDefaults(2048u * MB, 0u);
+ ASSERT_EQ(512u * pm * MB, constraints.max_old_generation_size_in_bytes());
+ ASSERT_EQ(3 * 4096 * pm * KB,
+ constraints.max_young_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_old_generation_size_in_bytes());
+ ASSERT_EQ(0u, constraints.initial_young_generation_size_in_bytes());
+}
+
+} // namespace v8
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index 6e5c9131fd..eb72d45263 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -155,6 +155,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnPlatformObject) {
caller_context->Global()->Set(caller_context, object_key, object).ToChecked();
const char script[] =
"function f() { object.property; object.property = 0; } "
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); "
"%OptimizeFunctionOnNextCall(f); "
"f();";
@@ -210,6 +211,7 @@ TEST_F(LapContextTest, CurrentContextInLazyAccessorOnInterface) {
.ToChecked();
const char script[] =
"function f() { Interface.property; Interface.property = 0; } "
+ "%PrepareFunctionForOptimization(f);"
"f(); f(); "
"%OptimizeFunctionOnNextCall(f); "
"f();";
diff --git a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
index 6099cd5a59..420b236432 100644
--- a/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
+++ b/deps/v8/test/unittests/base/utils/random-number-generator-unittest.cc
@@ -37,8 +37,9 @@ static void CheckSlowSample(const std::vector<uint64_t>& sample, uint64_t max,
}
}
-static void TestNextSample(RandomNumberGenerator& rng, uint64_t max,
- size_t size, bool slow = false) {
+static void TestNextSample(
+ RandomNumberGenerator& rng, // NOLINT(runtime/references)
+ uint64_t max, size_t size, bool slow = false) {
std::vector<uint64_t> sample =
slow ? rng.NextSampleSlow(max, size) : rng.NextSample(max, size);
diff --git a/deps/v8/test/unittests/base/vlq-base64-unittest.cc b/deps/v8/test/unittests/base/vlq-base64-unittest.cc
new file mode 100644
index 0000000000..8abec9b626
--- /dev/null
+++ b/deps/v8/test/unittests/base/vlq-base64-unittest.cc
@@ -0,0 +1,137 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+#include <initializer_list>
+#include <limits>
+
+#include "src/base/vlq-base64.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+TEST(VLQBASE64, charToDigit) {
+ char kSyms[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+ for (int i = 0; i < 256; ++i) {
+ char* pos = strchr(kSyms, static_cast<char>(i));
+ int8_t expected = i == 0 || pos == nullptr ? -1 : pos - kSyms;
+ EXPECT_EQ(expected, charToDigitDecodeForTesting(static_cast<uint8_t>(i)));
+ }
+}
+
+struct ExpectedVLQBase64Result {
+ size_t pos;
+ int32_t result;
+};
+
+void TestVLQBase64Decode(
+ const char* str,
+ std::initializer_list<ExpectedVLQBase64Result> expected_results) {
+ size_t pos = 0;
+ for (const auto& expect : expected_results) {
+ int32_t result = VLQBase64Decode(str, strlen(str), &pos);
+ EXPECT_EQ(expect.result, result);
+ EXPECT_EQ(expect.pos, pos);
+ }
+}
+
+TEST(VLQBASE64, DecodeOneSegment) {
+ TestVLQBase64Decode("", {{0, std::numeric_limits<int32_t>::min()}});
+
+ // Unsupported symbol.
+ TestVLQBase64Decode("*", {{0, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("&", {{0, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("kt:", {{2, std::numeric_limits<int32_t>::min()}});
+ TestVLQBase64Decode("k^C", {{1, std::numeric_limits<int32_t>::min()}});
+
+ // Imcomplete string.
+ TestVLQBase64Decode("kth4yp", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // Interpretable strings.
+ TestVLQBase64Decode("A", {{1, 0}});
+ TestVLQBase64Decode("C", {{1, 1}});
+ TestVLQBase64Decode("Y", {{1, 12}});
+ TestVLQBase64Decode("2H", {{2, 123}});
+ TestVLQBase64Decode("ktC", {{3, 1234}});
+ TestVLQBase64Decode("yjY", {{3, 12345}});
+ TestVLQBase64Decode("gkxH", {{4, 123456}});
+ TestVLQBase64Decode("uorrC", {{5, 1234567}});
+ TestVLQBase64Decode("80wxX", {{5, 12345678}});
+ TestVLQBase64Decode("qxmvrH", {{6, 123456789}});
+ TestVLQBase64Decode("kth4ypC", {{7, 1234567890}});
+ TestVLQBase64Decode("+/////D", {{7, std::numeric_limits<int32_t>::max()}});
+ TestVLQBase64Decode("D", {{1, -1}});
+ TestVLQBase64Decode("Z", {{1, -12}});
+ TestVLQBase64Decode("3H", {{2, -123}});
+ TestVLQBase64Decode("ltC", {{3, -1234}});
+ TestVLQBase64Decode("zjY", {{3, -12345}});
+ TestVLQBase64Decode("hkxH", {{4, -123456}});
+ TestVLQBase64Decode("vorrC", {{5, -1234567}});
+ TestVLQBase64Decode("90wxX", {{5, -12345678}});
+ TestVLQBase64Decode("rxmvrH", {{6, -123456789}});
+ TestVLQBase64Decode("lth4ypC", {{7, -1234567890}});
+ TestVLQBase64Decode("//////D", {{7, -std::numeric_limits<int32_t>::max()}});
+
+ // An overflowed value 12345678901 (0x2DFDC1C35).
+ TestVLQBase64Decode("qjuw7/2A", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value 123456789012(0x1CBE991A14).
+ TestVLQBase64Decode("ohtkz+lH", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value 4294967296 (0x100000000).
+ TestVLQBase64Decode("ggggggE", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -12345678901, |value| = (0x2DFDC1C35).
+ TestVLQBase64Decode("rjuw7/2A", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -123456789012,|value| = (0x1CBE991A14).
+ TestVLQBase64Decode("phtkz+lH", {{6, std::numeric_limits<int32_t>::min()}});
+
+ // An overflowed value -4294967296, |value| = (0x100000000).
+ TestVLQBase64Decode("hgggggE", {{6, std::numeric_limits<int32_t>::min()}});
+}
+
+TEST(VLQBASE64, DecodeTwoSegment) {
+ TestVLQBase64Decode("AA", {{1, 0}, {2, 0}});
+ TestVLQBase64Decode("KA", {{1, 5}, {2, 0}});
+ TestVLQBase64Decode("AQ", {{1, 0}, {2, 8}});
+ TestVLQBase64Decode("MG", {{1, 6}, {2, 3}});
+ TestVLQBase64Decode("a4E", {{1, 13}, {3, 76}});
+ TestVLQBase64Decode("4GyO", {{2, 108}, {4, 233}});
+ TestVLQBase64Decode("ggEqnD", {{3, 2048}, {6, 1653}});
+ TestVLQBase64Decode("g2/D0ilF", {{4, 65376}, {8, 84522}});
+ TestVLQBase64Decode("ss6gBy0m3B", {{5, 537798}, {10, 904521}});
+ TestVLQBase64Decode("LA", {{1, -5}, {2, 0}});
+ TestVLQBase64Decode("AR", {{1, 0}, {2, -8}});
+ TestVLQBase64Decode("NH", {{1, -6}, {2, -3}});
+ TestVLQBase64Decode("b5E", {{1, -13}, {3, -76}});
+ TestVLQBase64Decode("5GzO", {{2, -108}, {4, -233}});
+ TestVLQBase64Decode("hgErnD", {{3, -2048}, {6, -1653}});
+ TestVLQBase64Decode("h2/D1ilF", {{4, -65376}, {8, -84522}});
+ TestVLQBase64Decode("ts6gBz0m3B", {{5, -537798}, {10, -904521}});
+ TestVLQBase64Decode("4GzO", {{2, 108}, {4, -233}});
+ TestVLQBase64Decode("ggErnD", {{3, 2048}, {6, -1653}});
+ TestVLQBase64Decode("g2/D1ilF", {{4, 65376}, {8, -84522}});
+ TestVLQBase64Decode("ss6gBz0m3B", {{5, 537798}, {10, -904521}});
+ TestVLQBase64Decode("5GyO", {{2, -108}, {4, 233}});
+ TestVLQBase64Decode("hgEqnD", {{3, -2048}, {6, 1653}});
+ TestVLQBase64Decode("h2/D0ilF", {{4, -65376}, {8, 84522}});
+ TestVLQBase64Decode("ts6gBy0m3B", {{5, -537798}, {10, 904521}});
+}
+
+TEST(VLQBASE64, DecodeFourSegment) {
+ TestVLQBase64Decode("AAAA", {{1, 0}, {2, 0}, {3, 0}, {4, 0}});
+ TestVLQBase64Decode("QADA", {{1, 8}, {2, 0}, {3, -1}, {4, 0}});
+ TestVLQBase64Decode("ECQY", {{1, 2}, {2, 1}, {3, 8}, {4, 12}});
+ TestVLQBase64Decode("goGguCioPk9I",
+ {{3, 3200}, {6, 1248}, {9, 7809}, {12, 4562}});
+ TestVLQBase64Decode("6/BACA", {{3, 1021}, {4, 0}, {5, 1}, {6, 0}});
+ TestVLQBase64Decode("urCAQA", {{3, 1207}, {4, 0}, {5, 8}, {6, 0}});
+ TestVLQBase64Decode("sDACA", {{2, 54}, {3, 0}, {4, 1}, {5, 0}});
+}
+} // namespace base
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index fb6dc163b2..bc74e6fe19 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -94,7 +94,7 @@ class CompilerDispatcherTest : public TestWithNativeContext {
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
- shared->FunctionLiteralId(isolate), nullptr);
+ shared->function_literal_id(), nullptr);
return dispatcher->Enqueue(outer_parse_info.get(), function_name,
function_literal);
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 78663c52a5..867f89abfd 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -41,8 +41,9 @@ std::ostream& operator<<(std::ostream& os, const Shift& shift) {
// Helper to build Int32Constant or Int64Constant depending on the given
// machine type.
-Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
- int64_t value) {
+Node* BuildConstant(
+ InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
+ MachineType type, int64_t value) {
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m.Int32Constant(static_cast<int32_t>(value));
@@ -58,7 +59,6 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
return NULL;
}
-
// ARM64 logical instructions.
const MachInst2 kLogicalInstructions[] = {
{&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
@@ -1277,7 +1277,7 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1LL << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
@@ -1298,7 +1298,7 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1LL << bit;
+ uint64_t mask = uint64_t{1} << bit;
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
@@ -2196,7 +2196,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// x * (2^k + 1) -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
- m.Return(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)));
+ m.Return(
+ m.Int64Mul(m.Parameter(0), m.Int64Constant((int64_t{1} << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2209,7 +2210,8 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
// (2^k + 1) * x -> x + (x << k)
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
- m.Return(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)));
+ m.Return(
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2223,9 +2225,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)),
- m.Parameter(1)));
+ m.Return(m.Int64Add(
+ m.Int64Mul(m.Parameter(0), m.Int64Constant((int64_t{1} << k) + 1)),
+ m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2240,9 +2242,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)),
- m.Parameter(1)));
+ m.Return(m.Int64Add(
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(0)),
+ m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2257,9 +2259,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Parameter(0),
- m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ m.Return(m.Int64Add(
+ m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((int64_t{1} << k) + 1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2274,9 +2276,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Add(m.Parameter(0),
- m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ m.Return(m.Int64Add(
+ m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2291,9 +2293,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Sub(m.Parameter(0),
- m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ m.Return(m.Int64Sub(
+ m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((int64_t{1} << k) + 1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2308,9 +2310,9 @@ TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
TRACED_FORRANGE(int64_t, k, 1, 62) {
StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
MachineType::Int64());
- m.Return(
- m.Int64Sub(m.Parameter(0),
- m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ m.Return(m.Int64Sub(
+ m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((int64_t{1} << k) + 1), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
@@ -2842,6 +2844,65 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+static const WriteBarrierKind kWriteBarrierKinds[] = {
+ kMapWriteBarrier, kPointerWriteBarrier, kEphemeronKeyWriteBarrier,
+ kFullWriteBarrier};
+
+const int32_t kStoreWithBarrierImmediates[] = {
+ -256, -255, -3, -2, -1, 0, 1, 2, 3, 255,
+ 256, 264, 4096, 4104, 8192, 8200, 16384, 16392, 32752, 32760};
+
+using InstructionSelectorStoreWithBarrierTest =
+ InstructionSelectorTestWithParam<WriteBarrierKind>;
+
+TEST_P(InstructionSelectorStoreWithBarrierTest,
+ StoreWithWriteBarrierParameters) {
+ const WriteBarrierKind barrier_kind = GetParam();
+ StreamBuilder m(this, MachineType::Int32(),
+ MachineType::TypeCompressedTaggedPointer(),
+ MachineType::Int32(), MachineType::TypeCompressedTagged());
+ m.Store(MachineType::RepCompressedTagged(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), barrier_kind);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllExceptNopInstructions);
+ // We have two instructions that are not nops: Store and Return.
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchStoreWithWriteBarrier, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+TEST_P(InstructionSelectorStoreWithBarrierTest,
+ StoreWithWriteBarrierImmediate) {
+ const WriteBarrierKind barrier_kind = GetParam();
+ TRACED_FOREACH(int32_t, index, kStoreWithBarrierImmediates) {
+ StreamBuilder m(this, MachineType::Int32(),
+ MachineType::TypeCompressedTaggedPointer(),
+ MachineType::TypeCompressedTagged());
+ m.Store(MachineType::RepCompressedTagged(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), barrier_kind);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build(kAllExceptNopInstructions);
+ // We have two instructions that are not nops: Store and Return.
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchStoreWithWriteBarrier, s[0]->arch_opcode());
+ // With compressed pointers, a store with barrier is a 32-bit str which has
+ // a smaller immediate range.
+ if (COMPRESS_POINTERS_BOOL && (index > 16380)) {
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ } else {
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ }
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorStoreWithBarrierTest,
+ ::testing::ValuesIn(kWriteBarrierKinds));
+
// -----------------------------------------------------------------------------
// Comparison instructions.
@@ -4601,9 +4662,11 @@ namespace {
// Builds a call with the specified signature and nodes as arguments.
// Then checks that the correct number of kArm64Poke and kArm64PokePair were
// generated.
-void TestPokePair(InstructionSelectorTest::StreamBuilder& m, Zone* zone,
- MachineSignature::Builder& builder, Node* nodes[],
- int num_nodes, int expected_poke_pair, int expected_poke) {
+void TestPokePair(
+ InstructionSelectorTest::StreamBuilder& m, // NOLINT(runtime/references)
+ Zone* zone,
+ MachineSignature::Builder& builder, // NOLINT(runtime/references)
+ Node* nodes[], int num_nodes, int expected_poke_pair, int expected_poke) {
auto call_descriptor =
InstructionSelectorTest::StreamBuilder::MakeSimpleCallDescriptor(
zone, builder.Build());
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index ecc1712e3d..a48ad1b359 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -5,6 +5,7 @@
#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/schedule.h"
@@ -38,11 +39,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSequence sequence(test_->isolate(), test_->zone(),
instruction_blocks);
SourcePositionTable source_position_table(graph());
+ TickCounter tick_counter;
InstructionSelector selector(
test_->zone(), node_count, &linkage, &sequence, schedule,
&source_position_table, nullptr,
- InstructionSelector::kEnableSwitchJumpTable, source_position_mode,
- features, InstructionSelector::kDisableScheduling,
+ InstructionSelector::kEnableSwitchJumpTable, &tick_counter,
+ source_position_mode, features, InstructionSelector::kDisableScheduling,
InstructionSelector::kEnableRootsRelativeAddressing,
PoisoningMitigationLevel::kPoisonAll);
selector.SelectInstructions();
diff --git a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
index 9fcaedc0f8..34fb84957c 100644
--- a/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/branch-elimination-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/branch-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
@@ -27,7 +28,7 @@ class BranchEliminationTest : public GraphTest {
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
machine());
- GraphReducer graph_reducer(zone(), graph(), jsgraph.Dead());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), jsgraph.Dead());
BranchElimination branch_condition_elimination(&graph_reducer, &jsgraph,
zone());
graph_reducer.AddReducer(&branch_condition_elimination);
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index fa779891ac..d3c81344f2 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -59,8 +59,7 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
Handle<BytecodeArray> bytecode,
const std::vector<std::pair<std::string, std::string>>&
expected_liveness) {
- BytecodeAnalysis analysis(bytecode, zone(), true);
- analysis.Analyze(BailoutId::None());
+ BytecodeAnalysis analysis(bytecode, zone(), BailoutId::None(), true);
interpreter::BytecodeArrayIterator iterator(bytecode);
for (auto liveness : expected_liveness) {
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index c97bb96b49..690701cf56 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -29,7 +29,7 @@ class CommonOperatorReducerTest : public GraphTest {
Reduction Reduce(
AdvancedReducer::Editor* editor, Node* node,
MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
- JSHeapBroker broker(isolate(), zone());
+ JSHeapBroker broker(isolate(), zone(), FLAG_trace_heap_broker);
MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
flags);
CommonOperatorReducer reducer(editor, graph(), &broker, common(), &machine,
diff --git a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
index d30449daa7..135e8e307f 100644
--- a/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/constant-folding-reducer-unittest.cc
@@ -63,7 +63,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
public:
ConstantFoldingReducerTest()
: TypedGraphTest(3),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
simplified_(zone()),
deps_(&broker_, zone()) {}
~ConstantFoldingReducerTest() override = default;
@@ -75,7 +75,7 @@ class ConstantFoldingReducerTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
ConstantFoldingReducer reducer(&graph_reducer, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index 992ddcc55b..bda547b3f0 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -25,7 +25,8 @@ class ControlFlowOptimizerTest : public GraphTest {
protected:
void Optimize() {
- ControlFlowOptimizer optimizer(graph(), common(), machine(), zone());
+ ControlFlowOptimizer optimizer(graph(), common(), machine(), tick_counter(),
+ zone());
optimizer.Optimize();
}
diff --git a/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
index 65bdb4c46e..fa938f76da 100644
--- a/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/decompression-elimination-unittest.cc
@@ -3,11 +3,14 @@
// found in the LICENSE file.
#include "src/compiler/decompression-elimination.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+using testing::_;
using testing::StrictMock;
namespace v8 {
@@ -24,12 +27,69 @@ class DecompressionEliminationTest : public GraphTest {
~DecompressionEliminationTest() override = default;
protected:
- Reduction Reduce(Node* node) {
- StrictMock<MockAdvancedReducerEditor> editor;
- DecompressionElimination decompression_elimination(&editor, graph(),
+ Reduction Reduce(StrictMock<MockAdvancedReducerEditor>* editor, Node* node) {
+ DecompressionElimination decompression_elimination(editor, graph(),
machine(), common());
return decompression_elimination.Reduce(node);
}
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+ Node* GetUniqueValueUse(Node* node) {
+ Node* value_use = nullptr;
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsValueEdge(edge)) {
+ if (value_use) {
+ return nullptr;
+ } else {
+ value_use = edge.from();
+ }
+ }
+ }
+ // Return the value use of node after the reduction, if there is exactly one
+ return value_use;
+ }
+
+ const Operator* DecompressionOpFromAccess(const ElementAccess access) {
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kCompressed:
+ return machine()->ChangeCompressedToTagged();
+ case MachineRepresentation::kCompressedSigned:
+ return machine()->ChangeCompressedSignedToTaggedSigned();
+ case MachineRepresentation::kCompressedPointer:
+ return machine()->ChangeCompressedPointerToTaggedPointer();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ const Operator* CompressionOpFromAccess(const ElementAccess access) {
+ switch (access.machine_type.representation()) {
+ case MachineRepresentation::kCompressed:
+ return machine()->ChangeTaggedToCompressed();
+ case MachineRepresentation::kCompressedSigned:
+ return machine()->ChangeTaggedSignedToCompressedSigned();
+ case MachineRepresentation::kCompressedPointer:
+ return machine()->ChangeTaggedPointerToCompressedPointer();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // 'Global' accesses used to simplify the tests.
+ ElementAccess const any_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::AnyCompressed(),
+ kNoWriteBarrier};
+ ElementAccess const signed_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedSigned(),
+ kNoWriteBarrier};
+ ElementAccess const pointer_access = {kTaggedBase, kTaggedSize, Type::Any(),
+ MachineType::CompressedPointer(),
+ kNoWriteBarrier};
+ const ElementAccess element_accesses[3] = {any_access, signed_access,
+ pointer_access};
+
MachineOperatorBuilder* machine() { return &machine_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
@@ -39,222 +99,90 @@ class DecompressionEliminationTest : public GraphTest {
};
// -----------------------------------------------------------------------------
-// Direct Decompression & Compression
+// Direct Decompression & Compression.
TEST_F(DecompressionEliminationTest, BasicDecompressionCompression) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedToTagged(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionSigned) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
- }
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedSigned(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
- Node* changeToCompressed = graph()->NewNode(
- machine()->ChangeTaggedSignedToCompressedSigned(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
+ // Pairs of <load, store> accesses
+ const std::pair<ElementAccess, ElementAccess> accesses[] = {
+ {any_access, any_access}, {signed_access, any_access},
+ {pointer_access, any_access}, {any_access, signed_access},
+ {signed_access, signed_access}, {any_access, pointer_access},
+ {pointer_access, pointer_access}};
-TEST_F(DecompressionEliminationTest, BasicDecompressionCompressionPointer) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
+ for (size_t i = 0; i < arraysize(accesses); ++i) {
+ // Create the graph.
+ Node* load = graph()->NewNode(simplified()->LoadElement(accesses[i].first),
+ object, index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(accesses[i].first), load);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(accesses[i].second), change_to_tagged);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(accesses[i].second), object,
+ index, change_to_compressed, effect, control);
+
+ // Reduce.
+ Reduction r = Reduce(change_to_compressed);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
}
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(access), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed = graph()->NewNode(
- machine()->ChangeTaggedPointerToCompressedPointer(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(access), object, index,
- changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
}
// -----------------------------------------------------------------------------
-// Direct Decompression & Compression - border cases
-
-// For example, if we are lowering a CheckedCompressedToTaggedPointer in the
-// effect linearization phase we will change that to
-// ChangeCompressedPointerToTaggedPointer. Then, we might end up with a chain of
-// Parent <- ChangeCompressedPointerToTaggedPointer <- ChangeTaggedToCompressed
-// <- Child.
-// Similarly, we have cases with Signed instead of pointer.
-// The following border case tests will test that the functionality is robust
-// enough to handle that.
-
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCaseSigned) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
- }
+// Direct Compression & Decompression
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged =
- graph()->NewNode(machine()->ChangeCompressedSignedToTaggedSigned(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCasePointer) {
- // Skip test if pointer compression is not enabled
+TEST_F(DecompressionEliminationTest, BasicCompressionDecompression) {
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(),
- kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
-}
-// We also have cases of ChangeCompressedToTagged <-
-// ChangeTaggedPointerToCompressedPointer, where the
-// ChangeTaggedPointerToCompressedPointer was introduced while lowering a
-// NewConsString on effect control linearizer
+ // Pairs of <load, store> accesses
+ const std::pair<ElementAccess, ElementAccess> accesses[] = {
+ {any_access, any_access}, {signed_access, any_access},
+ {pointer_access, any_access}, {any_access, signed_access},
+ {signed_access, signed_access}, {any_access, pointer_access},
+ {pointer_access, pointer_access}};
-TEST_F(DecompressionEliminationTest,
- BasicDecompressionCompressionBorderCasePointerDecompression) {
- // Skip test if pointer compression is not enabled
- if (!COMPRESS_POINTERS_BOOL) {
- return;
+ for (size_t i = 0; i < arraysize(accesses); ++i) {
+ // Create the graph.
+ Node* load = graph()->NewNode(simplified()->LoadElement(accesses[i].first),
+ object, index, effect, control);
+ Node* change_to_compressed =
+ graph()->NewNode(CompressionOpFromAccess(accesses[i].first), load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(accesses[i].second), change_to_compressed);
+ effect = graph()->NewNode(simplified()->StoreElement(accesses[i].second),
+ object, index, change_to_tagged, effect, control);
+
+ // Reduce.
+ Reduction r = Reduce(change_to_tagged);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(load, r.replacement());
}
-
- // Define variables
- Node* const control = graph()->start();
- Node* object = Parameter(Type::Any(), 0);
- Node* effect = graph()->start();
- Node* index = Parameter(Type::UnsignedSmall(), 1);
- ElementAccess const loadAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::TaggedPointer(),
- kNoWriteBarrier};
- ElementAccess const storeAccess = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyTagged(), kNoWriteBarrier};
-
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(loadAccess), object,
- index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* changeToCompressed =
- graph()->NewNode(machine()->ChangeTaggedToCompressed(), changeToTagged);
- effect = graph()->NewNode(simplified()->StoreElement(storeAccess), object,
- index, changeToCompressed, effect, control);
-
- // Reduce
- Reduction r = Reduce(changeToCompressed);
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(load, r.replacement());
}
// -----------------------------------------------------------------------------
-// Compress after constant
+// Compress after constant.
-TEST_F(DecompressionEliminationTest,
- DecompressionConstantStoreElementInt64Constant) {
+TEST_F(DecompressionEliminationTest, CompressionAfterInt64Constant) {
// Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
@@ -266,21 +194,6 @@ TEST_F(DecompressionEliminationTest,
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const Operator* compression_ops[] = {
- machine()->ChangeTaggedToCompressed(),
- machine()->ChangeTaggedSignedToCompressedSigned(),
- machine()->ChangeTaggedPointerToCompressedPointer()};
-
- ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
-
const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
static_cast<int64_t>(0x0000FFFFFFFF0000),
@@ -291,31 +204,26 @@ TEST_F(DecompressionEliminationTest,
static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
- // For every compression.
- for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every Int64Constant.
for (size_t j = 0; j < arraysize(constants); ++j) {
// Create the graph.
Node* constant = graph()->NewNode(common()->Int64Constant(constants[j]));
- Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
- effect =
- graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
- object, index, changeToCompressed, effect, control);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(element_accesses[i]), constant);
+ effect = graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, change_to_compressed, effect,
+ control);
// Reduce.
- Reduction r = Reduce(changeToCompressed);
+ Reduction r = Reduce(change_to_compressed);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kInt32Constant);
}
}
}
-TEST_F(DecompressionEliminationTest,
- DecompressionConstantStoreElementHeapConstant) {
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
- if (COMPRESS_POINTERS_BOOL) {
- return;
- }
+TEST_F(DecompressionEliminationTest, CompressionAfterHeapConstant) {
// Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
@@ -327,21 +235,6 @@ TEST_F(DecompressionEliminationTest,
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const Operator* compression_ops[] = {
- machine()->ChangeTaggedToCompressed(),
- machine()->ChangeTaggedSignedToCompressedSigned(),
- machine()->ChangeTaggedPointerToCompressedPointer()};
-
- ASSERT_EQ(arraysize(compression_ops), arraysize(element_accesses));
-
const Handle<HeapNumber> heap_constants[] = {
factory()->NewHeapNumber(0.0),
factory()->NewHeapNumber(-0.0),
@@ -359,119 +252,83 @@ TEST_F(DecompressionEliminationTest,
factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
- // For every compression.
- for (size_t i = 0; i < arraysize(compression_ops); ++i) {
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every HeapNumber.
for (size_t j = 0; j < arraysize(heap_constants); ++j) {
// Create the graph.
Node* constant =
graph()->NewNode(common()->HeapConstant(heap_constants[j]));
- Node* changeToCompressed = graph()->NewNode(compression_ops[i], constant);
- effect =
- graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
- object, index, changeToCompressed, effect, control);
+ Node* change_to_compressed = graph()->NewNode(
+ CompressionOpFromAccess(element_accesses[i]), constant);
+ effect = graph()->NewNode(simplified()->StoreElement(element_accesses[i]),
+ object, index, change_to_compressed, effect,
+ control);
// Reduce.
- Reduction r = Reduce(changeToCompressed);
+ Reduction r = Reduce(change_to_compressed);
ASSERT_TRUE(r.Changed());
- // TODO(v8:8977): Change the IrOpcode here to kCompressedHeapConstant when
- // that is in place.
- EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kHeapConstant);
+ EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kCompressedHeapConstant);
}
}
}
// -----------------------------------------------------------------------------
-// Phi
+// Phi.
TEST_F(DecompressionEliminationTest, PhiOneDecompress) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 1;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged = graph()->NewNode(decompression_ops[i], load);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
change_to_tagged, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(DecompressionOpFromAccess(element_accesses[i]), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeDecompressSameRepresentation) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load1 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
@@ -481,115 +338,98 @@ TEST_F(DecompressionEliminationTest, PhiThreeDecompressSameRepresentation) {
Node* load3 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
- Node* change_to_tagged3 = graph()->NewNode(decompression_ops[i], load3);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* change_to_tagged_3 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load3);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+ change_to_tagged_1, change_to_tagged_2, change_to_tagged_3, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(opcodes[i], r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(DecompressionOpFromAccess(element_accesses[i]), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeDecompressOneAnyRepresentation) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
+ // Signed and Pointer (and not Any) accesses.
+ const ElementAccess not_any_accesses[] = {signed_access, pointer_access};
- const ElementAccess any_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::AnyCompressed(),
- kNoWriteBarrier};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
-
- // For every access
- for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // For every access.
+ for (size_t i = 0; i < arraysize(not_any_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(not_any_accesses[i]), object,
index, effect, control);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(not_any_accesses[i]), object,
index, effect, control);
- // Note that load3 loads a CompressedAny instead of element_accesses[i]
+ // Note that load3 loads a CompressedAny instead of not_any_accesses[i]
Node* load3 = graph()->NewNode(simplified()->LoadElement(any_access),
object, index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
- Node* change_to_tagged3 =
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(not_any_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(not_any_accesses[i]), load2);
+ Node* change_to_tagged_3 =
graph()->NewNode(machine()->ChangeCompressedToTagged(), load3);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, change_to_tagged3, control);
+ change_to_tagged_1, change_to_tagged_2, change_to_tagged_3, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(machine()->ChangeCompressedToTagged(), decompress->op());
}
}
TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 3;
- const Operator* decompression_ops[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess element_accesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyCompressed(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::CompressedPointer(),
- kNoWriteBarrier}};
-
- const IrOpcode::Value opcodes[] = {
- IrOpcode::kChangeCompressedToTagged,
- IrOpcode::kChangeCompressedSignedToTaggedSigned,
- IrOpcode::kChangeCompressedPointerToTaggedPointer};
-
- ASSERT_EQ(arraysize(decompression_ops), arraysize(element_accesses));
- ASSERT_EQ(arraysize(opcodes), arraysize(element_accesses));
-
- // For every access
+ // For every access.
for (size_t i = 0; i < arraysize(element_accesses); ++i) {
- // Create the graph
+ // Create the graph.
Node* load1 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
@@ -599,14 +439,16 @@ TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
Node* load3 =
graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(decompression_ops[i], load1);
- Node* change_to_tagged2 = graph()->NewNode(decompression_ops[i], load2);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, load3, control);
+ change_to_tagged_1, change_to_tagged_2, load3, control);
- // Reduce
+ // Reduce.
Reduction r = Reduce(phi);
ASSERT_FALSE(r.Changed());
}
@@ -615,287 +457,251 @@ TEST_F(DecompressionEliminationTest, PhiThreeInputsOneNotDecompressed) {
// In the case of having one decompress Signed and one Pointer, we have to
// generate the conservative decompress any after the Phi.
TEST_F(DecompressionEliminationTest, PhiTwoDecompressesOneSignedOnePointer) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
const int number_of_inputs = 2;
- const ElementAccess signed_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::CompressedSigned(),
- kNoWriteBarrier};
- const ElementAccess pointer_access = {kTaggedBase, kTaggedSize, Type::Any(),
- MachineType::CompressedPointer(),
- kNoWriteBarrier};
- // Create the graph
+ // Create the graph.
Node* load1 = graph()->NewNode(simplified()->LoadElement(signed_access),
object, index, effect, control);
Node* load2 = graph()->NewNode(simplified()->LoadElement(pointer_access),
object, index, effect, control);
- Node* change_to_tagged1 = graph()->NewNode(
- machine()->ChangeCompressedSignedToTaggedSigned(), load1);
- Node* change_to_tagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(signed_access), load1);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(pointer_access), load2);
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, number_of_inputs),
- change_to_tagged1, change_to_tagged2, control);
+ change_to_tagged_1, change_to_tagged_2, control);
- // Reduce
- Reduction r = Reduce(phi);
+ // Reduce.
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, ReplaceWithValue(phi, _, _, _));
+ Reduction r = Reduce(&editor, phi);
ASSERT_TRUE(r.Changed());
- EXPECT_EQ(IrOpcode::kChangeCompressedToTagged, r.replacement()->opcode());
+
+ // Get the actual decompress after the Phi, and check against the expected
+ // one.
+ Node* decompress = GetUniqueValueUse(phi);
+ EXPECT_EQ(machine()->ChangeCompressedToTagged(), decompress->op());
}
// -----------------------------------------------------------------------------
-// TypedStateValues
+// TypedStateValues.
TEST_F(DecompressionEliminationTest, TypedStateValuesOneDecompress) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 1;
+ const int number_of_inputs = 1;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
- object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load);
- Node* typedStateValuesOneDecompress = graph()->NewNode(
- common()->TypedStateValues(types, dense), changeToTagged);
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
+ Node* typedStateValues = graph()->NewNode(
+ common()->TypedStateValues(types, dense), change_to_tagged);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesTwoDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
- Node* typedStateValuesOneDecompress =
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* typedStateValues =
graph()->NewNode(common()->TypedStateValues(types, dense),
- changeToTagged1, load1, changeToTagged2);
+ change_to_tagged_1, load1, change_to_tagged_2);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load1);
+ // Note that the input at index 1 didn't change.
+ EXPECT_EQ(r.replacement()->InputAt(1), load1);
+ EXPECT_EQ(r.replacement()->InputAt(2), load2);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesAllDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load1);
+ Node* change_to_tagged_1 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load2);
+ Node* change_to_tagged_2 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load2);
Node* load3 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]), object,
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
index, effect, control);
- Node* changeToTagged3 = graph()->NewNode(
- machine()->ChangeCompressedPointerToTaggedPointer(), load3);
- Node* typedStateValuesOneDecompress =
- graph()->NewNode(common()->TypedStateValues(types, dense),
- changeToTagged1, changeToTagged2, changeToTagged3);
+ Node* change_to_tagged_3 =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load3);
+ Node* typedStateValues = graph()->NewNode(
+ common()->TypedStateValues(types, dense), change_to_tagged_1,
+ change_to_tagged_2, change_to_tagged_3);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(r.replacement()->InputAt(0), load1);
+ EXPECT_EQ(r.replacement()->InputAt(1), load2);
+ EXPECT_EQ(r.replacement()->InputAt(2), load3);
}
}
TEST_F(DecompressionEliminationTest, TypedStateValuesNoDecompresses) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const int numberOfInputs = 3;
+ const int number_of_inputs = 3;
const ZoneVector<MachineType>* types =
new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
- ZoneVector<MachineType>(numberOfInputs, graph()->zone());
+ ZoneVector<MachineType>(number_of_inputs, graph()->zone());
SparseInputMask dense = SparseInputMask::Dense();
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- // For every access
- for (size_t i = 0; i < arraysize(ElementAccesses); ++i) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[i]),
- object, index, effect, control);
- Node* typedStateValuesOneDecompress = graph()->NewNode(
+
+ // For every access.
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* typedStateValues = graph()->NewNode(
common()->TypedStateValues(types, dense), load, load, load);
- // Reduce
+ // Reduce.
StrictMock<MockAdvancedReducerEditor> editor;
DecompressionElimination decompression_elimination(&editor, graph(),
machine(), common());
- Reduction r =
- decompression_elimination.Reduce(typedStateValuesOneDecompress);
+ Reduction r = decompression_elimination.Reduce(typedStateValues);
ASSERT_FALSE(r.Changed());
}
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of two decompressions
+// Word64Equal comparison of two decompressions.
TEST_F(DecompressionEliminationTest, TwoDecompressionWord64Equal) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
// For every decompression (rhs)
- for (size_t k = 0; k < arraysize(DecompressionOps); ++k) {
- // Create the graph
+ for (size_t j = 0; j < arraysize(element_accesses); ++j) {
+ // Create the graph.
Node* load1 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged1 = graph()->NewNode(DecompressionOps[j], load1);
+ Node* change_to_tagged_1 = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load1);
Node* load2 =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[k]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[j]),
object, index, effect, control);
- Node* changeToTagged2 = graph()->NewNode(DecompressionOps[j], load2);
- Node* comparison = graph()->NewNode(machine()->Word64Equal(),
- changeToTagged1, changeToTagged2);
- // Reduce
+ Node* change_to_tagged_2 = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load2);
+ Node* comparison = graph()->NewNode(
+ machine()->Word64Equal(), change_to_tagged_1, change_to_tagged_2);
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -904,44 +710,31 @@ TEST_F(DecompressionEliminationTest, TwoDecompressionWord64Equal) {
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of two decompressions, where lhs == rhs
+// Word64Equal comparison of two decompressions, where lhs == rhs.
TEST_F(DecompressionEliminationTest, TwoDecompressionWord64EqualSameInput) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- // For every decompression (same for lhs and rhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // Create the graph
- Node* load = graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
- object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ // For every access. (same for lhs and rhs)
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // Create the graph.
+ Node* load =
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]), object,
+ index, effect, control);
+ Node* change_to_tagged =
+ graph()->NewNode(DecompressionOpFromAccess(element_accesses[i]), load);
Node* comparison = graph()->NewNode(machine()->Word64Equal(),
- changeToTagged, changeToTagged);
- // Reduce
+ change_to_tagged, change_to_tagged);
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -949,35 +742,20 @@ TEST_F(DecompressionEliminationTest, TwoDecompressionWord64EqualSameInput) {
}
// -----------------------------------------------------------------------------
-// Word64Equal comparison of decompress and a constant
+// Word64Equal comparison of decompress and a constant.
TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
const int64_t constants[] = {static_cast<int64_t>(0x0000000000000000),
static_cast<int64_t>(0x0000000000000001),
static_cast<int64_t>(0x0000FFFFFFFF0000),
@@ -988,24 +766,25 @@ TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // For every constant (rhs)
- for (size_t k = 0; k < arraysize(constants); ++k) {
- // Test with both (lhs, rhs) combinations
- for (bool lhsIsDecompression : {false, true}) {
- // Create the graph
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // For every constant (rhs).
+ for (size_t j = 0; j < arraysize(constants); ++j) {
+ // Test with both (lhs, rhs) combinations.
+ for (bool lhs_is_decompression : {false, true}) {
+ // Create the graph.
Node* load =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load);
Node* constant =
- graph()->NewNode(common()->Int64Constant(constants[k]));
+ graph()->NewNode(common()->Int64Constant(constants[j]));
- Node* lhs = lhsIsDecompression ? changeToTagged : constant;
- Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* lhs = lhs_is_decompression ? change_to_tagged : constant;
+ Node* rhs = lhs_is_decompression ? constant : change_to_tagged;
Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
- // Reduce
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
@@ -1015,38 +794,18 @@ TEST_F(DecompressionEliminationTest, DecompressionConstantWord64Equal) {
}
TEST_F(DecompressionEliminationTest, DecompressionHeapConstantWord64Equal) {
- // TODO(v8:8977): Disabling HeapConstant until CompressedHeapConstant
- // exists, since it breaks with verify CSA on.
- if (COMPRESS_POINTERS_BOOL) {
- return;
- }
- // Skip test if pointer compression is not enabled
+ // Skip test if pointer compression is not enabled.
if (!COMPRESS_POINTERS_BOOL) {
return;
}
- // Define variables
+ // Define variables.
Node* const control = graph()->start();
Node* object = Parameter(Type::Any(), 0);
Node* effect = graph()->start();
Node* index = Parameter(Type::UnsignedSmall(), 1);
- const Operator* DecompressionOps[] = {
- machine()->ChangeCompressedToTagged(),
- machine()->ChangeCompressedSignedToTaggedSigned(),
- machine()->ChangeCompressedPointerToTaggedPointer()};
-
- const ElementAccess ElementAccesses[] = {
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::AnyTagged(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedSigned(),
- kNoWriteBarrier},
- {kTaggedBase, kTaggedSize, Type::Any(), MachineType::TaggedPointer(),
- kNoWriteBarrier}};
-
- ASSERT_EQ(arraysize(DecompressionOps), arraysize(ElementAccesses));
-
- const Handle<HeapNumber> heapConstants[] = {
+ const Handle<HeapNumber> heap_constants[] = {
factory()->NewHeapNumber(0.0),
factory()->NewHeapNumber(-0.0),
factory()->NewHeapNumber(11.2),
@@ -1063,24 +822,25 @@ TEST_F(DecompressionEliminationTest, DecompressionHeapConstantWord64Equal) {
factory()->NewHeapNumber(0x8FFFFFFFFFFFFFFF),
factory()->NewHeapNumber(0xFFFFFFFFFFFFFFFF)};
- // For every decompression (lhs)
- for (size_t j = 0; j < arraysize(DecompressionOps); ++j) {
- // For every constant (rhs)
- for (size_t k = 0; k < arraysize(heapConstants); ++k) {
- // Test with both (lhs, rhs) combinations
- for (bool lhsIsDecompression : {false, true}) {
- // Create the graph
+ // For every decompression (lhs).
+ for (size_t i = 0; i < arraysize(element_accesses); ++i) {
+ // For every constant (rhs).
+ for (size_t j = 0; j < arraysize(heap_constants); ++j) {
+ // Test with both (lhs, rhs) combinations.
+ for (bool lhs_is_decompression : {false, true}) {
+ // Create the graph.
Node* load =
- graph()->NewNode(simplified()->LoadElement(ElementAccesses[j]),
+ graph()->NewNode(simplified()->LoadElement(element_accesses[i]),
object, index, effect, control);
- Node* changeToTagged = graph()->NewNode(DecompressionOps[j], load);
+ Node* change_to_tagged = graph()->NewNode(
+ DecompressionOpFromAccess(element_accesses[i]), load);
Node* constant =
- graph()->NewNode(common()->HeapConstant(heapConstants[k]));
+ graph()->NewNode(common()->HeapConstant(heap_constants[j]));
- Node* lhs = lhsIsDecompression ? changeToTagged : constant;
- Node* rhs = lhsIsDecompression ? constant : changeToTagged;
+ Node* lhs = lhs_is_decompression ? change_to_tagged : constant;
+ Node* rhs = lhs_is_decompression ? constant : change_to_tagged;
Node* comparison = graph()->NewNode(machine()->Word64Equal(), lhs, rhs);
- // Reduce
+ // Reduce.
Reduction r = Reduce(comparison);
ASSERT_TRUE(r.Changed());
EXPECT_EQ(r.replacement()->opcode(), IrOpcode::kWord32Equal);
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index f506502610..1f952cc1a6 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/test-utils.h"
using testing::_;
@@ -237,9 +238,11 @@ class AdvancedReducerTest : public TestWithZone {
protected:
Graph* graph() { return &graph_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
Graph graph_;
+ TickCounter tick_counter_;
};
@@ -368,7 +371,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse2) {
Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
- GraphReducer graph_reducer(zone(), graph(), dead);
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), dead);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
EXPECT_EQ(start, use_control->InputAt(0));
@@ -392,7 +395,7 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse3) {
Node* exception = graph()->NewNode(common.IfException(), effect, node);
Node* use_control = graph()->NewNode(common.Merge(1), success);
Node* replacement = graph()->NewNode(&kMockOperator);
- GraphReducer graph_reducer(zone(), graph(), dead);
+ GraphReducer graph_reducer(zone(), graph(), tick_counter(), dead);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
EXPECT_EQ(start, use_control->InputAt(0));
@@ -422,20 +425,20 @@ class GraphReducerTest : public TestWithZone {
protected:
void ReduceNode(Node* node, Reducer* r) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.AddReducer(r3);
@@ -443,20 +446,20 @@ class GraphReducerTest : public TestWithZone {
}
void ReduceGraph(Reducer* r1) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.ReduceGraph();
}
void ReduceGraph(Reducer* r1, Reducer* r2) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.ReduceGraph();
}
void ReduceGraph(Reducer* r1, Reducer* r2, Reducer* r3) {
- GraphReducer reducer(zone(), graph());
+ GraphReducer reducer(zone(), graph(), tick_counter());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.AddReducer(r3);
@@ -464,9 +467,11 @@ class GraphReducerTest : public TestWithZone {
}
Graph* graph() { return &graph_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
Graph graph_;
+ TickCounter tick_counter_;
};
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index f433dda42e..485df8e401 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -18,7 +18,7 @@ GraphTest::GraphTest(int num_parameters)
: canonical_(isolate()),
common_(zone()),
graph_(zone()),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
source_positions_(&graph_),
node_origins_(&graph_) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
@@ -116,7 +116,8 @@ Matcher<Node*> GraphTest::IsUndefinedConstant() {
}
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters), typer_(broker(), Typer::kNoFlags, graph()) {}
+ : GraphTest(num_parameters),
+ typer_(broker(), Typer::kNoFlags, graph(), tick_counter()) {}
TypedGraphTest::~TypedGraphTest() = default;
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index fa42294a65..60d425b911 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -5,9 +5,11 @@
#ifndef V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
#define V8_UNITTESTS_COMPILER_GRAPH_UNITTEST_H_
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/typer.h"
#include "src/handles/handles.h"
@@ -62,6 +64,7 @@ class GraphTest : public TestWithNativeContextAndZone {
SourcePositionTable* source_positions() { return &source_positions_; }
NodeOriginTable* node_origins() { return &node_origins_; }
JSHeapBroker* broker() { return &broker_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
CanonicalHandleScope canonical_;
@@ -70,6 +73,7 @@ class GraphTest : public TestWithNativeContextAndZone {
JSHeapBroker broker_;
SourcePositionTable source_positions_;
NodeOriginTable node_origins_;
+ TickCounter tick_counter_;
};
diff --git a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
index b9f3ff8056..3d4e16ac68 100644
--- a/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-call-reducer-unittest.cc
@@ -4,6 +4,7 @@
#include <cctype>
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-call-reducer.h"
#include "src/compiler/js-graph.h"
@@ -33,7 +34,7 @@ class JSCallReducerTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSCallReducer reducer(&graph_reducer, &jsgraph, broker(),
JSCallReducer::kNoFlags, &deps_);
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
index 5a0d54e861..95c03e543f 100644
--- a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-create-lowering.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -43,7 +44,7 @@ class JSCreateLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph, broker(),
zone());
return reducer.Reduce(node);
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index d6e9876e64..3510cd4b74 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -36,7 +36,7 @@ class JSIntrinsicLoweringTest : public GraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSIntrinsicLowering reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 765a79db40..0d7bb946e3 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -47,7 +47,7 @@ class JSTypedLoweringTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
JSTypedLowering reducer(&graph_reducer, &jsgraph, broker(), zone());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index 07013aa52c..e084b495f9 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -66,7 +66,7 @@ class LoopPeelingTest : public GraphTest {
StdoutStream{} << AsRPO(*graph());
}
Zone zone(isolate()->allocator(), ZONE_NAME);
- return LoopFinder::BuildLoopTree(graph(), &zone);
+ return LoopFinder::BuildLoopTree(graph(), tick_counter(), &zone);
}
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index e85bc09e1e..7f3a613994 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -33,7 +33,7 @@ class MachineOperatorReducerTest : public GraphTest {
javascript_(zone()),
jsgraph_(isolate(), graph(), &common_, &javascript_, nullptr,
&machine_),
- graph_reducer_(zone(), graph(), jsgraph_.Dead()) {}
+ graph_reducer_(zone(), graph(), tick_counter(), jsgraph_.Dead()) {}
protected:
Reduction Reduce(Node* node) {
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 8ffdaf27d0..fc6f1d5500 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1095,9 +1095,11 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
if (NodeProperties::FirstControlIndex(node) < node->InputCount()) { \
control_node = NodeProperties::GetControlInput(node); \
} \
+ LoadRepresentation rep = IrOpcode::kLoadFromObject == node->opcode() \
+ ? ObjectAccessOf(node->op()).machine_type \
+ : LoadRepresentationOf(node->op()); \
return (TestNodeMatcher::MatchAndExplain(node, listener) && \
- PrintMatchAndExplain(LoadRepresentationOf(node->op()), "rep", \
- rep_matcher_, listener) && \
+ PrintMatchAndExplain(rep, "rep", rep_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), \
"base", base_matcher_, listener) && \
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), \
@@ -1119,6 +1121,7 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
+LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
@@ -2037,6 +2040,16 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher));
}
+Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsLoadFromObjectMatcher(rep_matcher, base_matcher,
+ index_matcher, effect_matcher,
+ control_matcher));
+}
+
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index a71f05964f..be8d67cb35 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -333,6 +333,11 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
diff --git a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
index 067b7c95ec..76fbc4a368 100644
--- a/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
+++ b/deps/v8/test/unittests/compiler/redundancy-elimination-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/redundancy-elimination.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/common-operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -299,6 +300,36 @@ TEST_F(RedundancyEliminationTest, CheckedFloat64ToInt64) {
}
// -----------------------------------------------------------------------------
+// CheckedInt32ToCompressedSigned
+
+TEST_F(RedundancyEliminationTest, CheckedInt32ToCompressedSigned) {
+ if (!COMPRESS_POINTERS_BOOL) {
+ return;
+ }
+ TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
+ TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
+ Node* value = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+
+ Node* check1 = effect = graph()->NewNode(
+ simplified()->CheckedInt32ToCompressedSigned(feedback1), value,
+ effect, control);
+ Reduction r1 = Reduce(check1);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(r1.replacement(), check1);
+
+ Node* check2 = effect = graph()->NewNode(
+ simplified()->CheckedInt32ToCompressedSigned(feedback2), value,
+ effect, control);
+ Reduction r2 = Reduce(check2);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(r2.replacement(), check1);
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
// CheckedInt32ToTaggedSigned
TEST_F(RedundancyEliminationTest, CheckedInt32ToTaggedSigned) {
@@ -938,7 +969,7 @@ TEST_F(RedundancyEliminationTest, CheckedUint64ToTaggedSigned) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -974,7 +1005,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberEqualWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1013,7 +1044,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1049,7 +1080,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1088,7 +1119,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1124,7 +1155,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberLessThanOrEqualWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
Node* lhs = Parameter(Type::UnsignedSmall(), 0);
@@ -1163,7 +1194,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberAddWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1189,7 +1220,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1219,7 +1250,7 @@ TEST_F(RedundancyEliminationTest, SpeculativeNumberAddWithCheckBoundsSameType) {
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1247,7 +1278,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeNumberSubtractWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1278,7 +1309,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1306,7 +1337,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerAddWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1337,7 +1368,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Any(), 0);
@@ -1365,7 +1396,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeSafeIntegerSubtractWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
Node* lhs = Parameter(Type::Range(42.0, 42.0, zone()), 0);
@@ -1396,7 +1427,7 @@ TEST_F(RedundancyEliminationTest,
TEST_F(RedundancyEliminationTest,
SpeculativeToNumberWithCheckBoundsBetterType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
@@ -1424,7 +1455,7 @@ TEST_F(RedundancyEliminationTest,
}
TEST_F(RedundancyEliminationTest, SpeculativeToNumberWithCheckBoundsSameType) {
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
TRACED_FOREACH(VectorSlotPair, feedback1, vector_slot_pairs()) {
TRACED_FOREACH(VectorSlotPair, feedback2, vector_slot_pairs()) {
TRACED_FOREACH(NumberOperationHint, hint, kNumberOperationHints) {
diff --git a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
index a71a5315f1..82c55d9cf8 100644
--- a/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
+++ b/deps/v8/test/unittests/compiler/regalloc/live-range-unittest.cc
@@ -56,13 +56,13 @@ class TestRangeBuilder {
LifetimePosition start = LifetimePosition::FromInt(pair.first);
LifetimePosition end = LifetimePosition::FromInt(pair.second);
CHECK(start < end);
- range->AddUseInterval(start, end, zone_);
+ range->AddUseInterval(start, end, zone_, FLAG_trace_turbo_alloc);
}
for (int pos : uses_) {
UsePosition* use_position =
new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
nullptr, UsePositionHintType::kNone);
- range->AddUsePosition(use_position);
+ range->AddUsePosition(use_position, FLAG_trace_turbo_alloc);
}
pairs_.clear();
@@ -129,10 +129,10 @@ TEST_F(LiveRangeUnitTest, InvalidConstruction) {
// Build a range manually, because the builder guards against empty cases.
TopLevelLiveRange* range =
new (zone()) TopLevelLiveRange(1, MachineRepresentation::kTagged);
- V8_ASSERT_DEBUG_DEATH(
- range->AddUseInterval(LifetimePosition::FromInt(0),
- LifetimePosition::FromInt(0), zone()),
- ".*");
+ V8_ASSERT_DEBUG_DEATH(range->AddUseInterval(LifetimePosition::FromInt(0),
+ LifetimePosition::FromInt(0),
+ zone(), FLAG_trace_turbo_alloc),
+ ".*");
}
TEST_F(LiveRangeUnitTest, SplitInvalidStart) {
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index e153738544..ee1c7997b3 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/scheduler.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -38,8 +39,8 @@ class SchedulerTest : public TestWithIsolateAndZone {
StdoutStream{} << AsJSON(*graph(), &table, &table2);
}
- Schedule* schedule =
- Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kSplitNodes);
+ Schedule* schedule = Scheduler::ComputeSchedule(
+ zone(), graph(), Scheduler::kSplitNodes, tick_counter());
if (FLAG_trace_turbo_scheduler) {
StdoutStream{} << *schedule << std::endl;
@@ -62,8 +63,10 @@ class SchedulerTest : public TestWithIsolateAndZone {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
JSOperatorBuilder* js() { return &js_; }
+ TickCounter* tick_counter() { return &tick_counter_; }
private:
+ TickCounter tick_counter_;
Graph graph_;
CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
@@ -88,7 +91,8 @@ const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
TEST_F(SchedulerTest, BuildScheduleEmpty) {
graph()->SetStart(graph()->NewNode(common()->Start(0)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags,
+ tick_counter()));
}
@@ -102,7 +106,8 @@ TEST_F(SchedulerTest, BuildScheduleOneParameter) {
graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags,
+ tick_counter()));
}
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 722384da5b..8a5a9eda91 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -4,6 +4,7 @@
#include "src/compiler/simplified-lowering.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/simplified-operator.h"
@@ -42,13 +43,13 @@ class SimplifiedLoweringTest : public GraphTest {
{
// Simplified lowering needs to run w/o the typer decorator so make sure
// the object is not live at the same time.
- Typer typer(broker(), Typer::kNoFlags, graph());
+ Typer typer(broker(), Typer::kNoFlags, graph(), tick_counter());
typer.Run();
}
- SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(),
- node_origins(),
- PoisoningMitigationLevel::kDontPoison);
+ SimplifiedLowering lowering(
+ jsgraph(), broker(), zone(), source_positions(), node_origins(),
+ PoisoningMitigationLevel::kDontPoison, tick_counter());
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 1f44eb088b..b198592ddd 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/simplified-operator-reducer.h"
+#include "src/codegen/tick-counter.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -29,12 +30,12 @@ class SimplifiedOperatorReducerTest : public GraphTest {
protected:
Reduction Reduce(Node* node) {
- JSHeapBroker broker(isolate(), zone());
+ JSHeapBroker broker(isolate(), zone(), FLAG_trace_heap_broker);
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph, &broker);
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 1b416628fc..9939c70c75 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -53,6 +53,7 @@ const PureOperator kPureOperators[] = {
PURE(NumberShiftRightLogical, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
+ PURE(ChangeCompressedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
diff --git a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
index 70c0b69047..fb40386378 100644
--- a/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typed-optimization-unittest.cc
@@ -37,7 +37,7 @@ class TypedOptimizationTest : public TypedGraphTest {
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
// TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
+ GraphReducer graph_reducer(zone(), graph(), tick_counter());
TypedOptimization reducer(&graph_reducer, &deps_, &jsgraph, broker());
return reducer.Reduce(node);
}
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 2eaa379f30..ec68993213 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -22,7 +22,7 @@ class TyperTest : public TypedGraphTest {
public:
TyperTest()
: TypedGraphTest(3),
- broker_(isolate(), zone()),
+ broker_(isolate(), zone(), FLAG_trace_heap_broker),
operation_typer_(&broker_, zone()),
types_(zone(), isolate(), random_number_generator()),
javascript_(zone()),
diff --git a/deps/v8/test/unittests/heap/heap-controller-unittest.cc b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
index 445c49052c..db330608f2 100644
--- a/deps/v8/test/unittests/heap/heap-controller-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-controller-unittest.cc
@@ -53,15 +53,14 @@ TEST_F(MemoryControllerTest, HeapGrowingFactor) {
}
TEST_F(MemoryControllerTest, MaxHeapGrowingFactor) {
- CheckEqualRounded(1.3,
- V8Controller::MaxGrowingFactor(V8HeapTrait::kMinSize * MB));
- CheckEqualRounded(
- 1.600, V8Controller::MaxGrowingFactor(V8HeapTrait::kMaxSize / 2 * MB));
- CheckEqualRounded(
- 1.999, V8Controller::MaxGrowingFactor(
- (V8HeapTrait::kMaxSize - Heap::kPointerMultiplier) * MB));
+ CheckEqualRounded(1.3, V8Controller::MaxGrowingFactor(V8HeapTrait::kMinSize));
+ CheckEqualRounded(1.600,
+ V8Controller::MaxGrowingFactor(V8HeapTrait::kMaxSize / 2));
+ CheckEqualRounded(2.0,
+ V8Controller::MaxGrowingFactor(
+ (V8HeapTrait::kMaxSize - Heap::kPointerMultiplier)));
CheckEqualRounded(4.0, V8Controller::MaxGrowingFactor(
- static_cast<size_t>(V8HeapTrait::kMaxSize) * MB));
+ static_cast<size_t>(V8HeapTrait::kMaxSize)));
}
TEST_F(MemoryControllerTest, OldGenerationAllocationLimit) {
@@ -77,42 +76,36 @@ TEST_F(MemoryControllerTest, OldGenerationAllocationLimit) {
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kDefault));
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kDefault));
factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kSlow));
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kSlow));
factor = Min(factor, V8HeapTrait::kConservativeGrowingFactor);
- EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
- V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kConservative));
+ EXPECT_EQ(
+ static_cast<size_t>(old_gen_size * factor + new_space_capacity),
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, 0u, max_old_generation_size, new_space_capacity,
+ factor, Heap::HeapGrowingMode::kConservative));
factor = V8HeapTrait::kMinGrowingFactor;
EXPECT_EQ(static_cast<size_t>(old_gen_size * factor + new_space_capacity),
V8Controller::CalculateAllocationLimit(
- heap, old_gen_size, max_old_generation_size, new_space_capacity,
- factor, Heap::HeapGrowingMode::kMinimal));
-}
+ heap, old_gen_size, 0u, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kMinimal));
-TEST_F(MemoryControllerTest, MaxOldGenerationSize) {
- uint64_t configurations[][2] = {
- {0, V8HeapTrait::kMinSize},
- {512, V8HeapTrait::kMinSize},
- {1 * GB, 256 * Heap::kPointerMultiplier},
- {2 * static_cast<uint64_t>(GB), 512 * Heap::kPointerMultiplier},
- {4 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize},
- {8 * static_cast<uint64_t>(GB), V8HeapTrait::kMaxSize}};
-
- for (auto configuration : configurations) {
- ASSERT_EQ(configuration[1],
- static_cast<uint64_t>(
- Heap::ComputeMaxOldGenerationSize(configuration[0])));
- }
+ factor = V8HeapTrait::kMinGrowingFactor;
+ size_t min_old_generation_size =
+ 2 * static_cast<size_t>(old_gen_size * factor + new_space_capacity);
+ EXPECT_EQ(
+ min_old_generation_size,
+ V8Controller::CalculateAllocationLimit(
+ heap, old_gen_size, min_old_generation_size, max_old_generation_size,
+ new_space_capacity, factor, Heap::HeapGrowingMode::kMinimal));
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/heap-unittest.cc b/deps/v8/test/unittests/heap/heap-unittest.cc
index fbc384ef1d..048ff5d0a6 100644
--- a/deps/v8/test/unittests/heap/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/heap-unittest.cc
@@ -19,14 +19,75 @@ namespace internal {
using HeapTest = TestWithIsolate;
using HeapWithPointerCompressionTest = TestWithIsolateAndPointerCompression;
-TEST(Heap, SemiSpaceSize) {
+TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
const size_t MB = static_cast<size_t>(i::MB);
+ const size_t KB = static_cast<size_t>(i::KB);
const size_t pm = i::Heap::kPointerMultiplier;
- ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(0u));
- ASSERT_EQ(512u * pm, i::Heap::ComputeMaxSemiSpaceSize(512u * MB));
- ASSERT_EQ(2048u * pm, i::Heap::ComputeMaxSemiSpaceSize(1024u * MB));
- ASSERT_EQ(5120u * pm, i::Heap::ComputeMaxSemiSpaceSize(2024u * MB));
- ASSERT_EQ(8192u * pm, i::Heap::ComputeMaxSemiSpaceSize(4095u * MB));
+ ASSERT_EQ(3 * 512u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(128u * pm * MB));
+ ASSERT_EQ(3 * 2048u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(256u * pm * MB));
+ ASSERT_EQ(3 * 4096u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(512u * pm * MB));
+ ASSERT_EQ(3 * 8192u * pm * KB,
+ i::Heap::YoungGenerationSizeFromOldGenerationSize(1024u * pm * MB));
+}
+
+TEST(Heap, GenerationSizesFromHeapSize) {
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t KB = static_cast<size_t>(i::KB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+ size_t old, young;
+
+ i::Heap::GenerationSizesFromHeapSize(1 * KB, &young, &old);
+ ASSERT_EQ(0u, old);
+ ASSERT_EQ(0u, young);
+
+ i::Heap::GenerationSizesFromHeapSize(1 * KB + 3 * 512u * pm * KB, &young,
+ &old);
+ ASSERT_EQ(1 * KB, old);
+ ASSERT_EQ(3 * 512u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(128 * pm * MB + 3 * 512 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(128u * pm * MB, old);
+ ASSERT_EQ(3 * 512u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(256u * pm * MB + 3 * 2048 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(256u * pm * MB, old);
+ ASSERT_EQ(3 * 2048u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(512u * pm * MB + 3 * 4096 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(512u * pm * MB, old);
+ ASSERT_EQ(3 * 4096u * pm * KB, young);
+
+ i::Heap::GenerationSizesFromHeapSize(1024u * pm * MB + 3 * 8192 * pm * KB,
+ &young, &old);
+ ASSERT_EQ(1024u * pm * MB, old);
+ ASSERT_EQ(3 * 8192u * pm * KB, young);
+}
+
+TEST(Heap, HeapSizeFromPhysicalMemory) {
+ const size_t MB = static_cast<size_t>(i::MB);
+ const size_t pm = i::Heap::kPointerMultiplier;
+
+ // The expected value is old_generation_size + 3 * semi_space_size.
+ ASSERT_EQ(128 * pm * MB + 3 * 512 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(0u));
+ ASSERT_EQ(128 * pm * MB + 3 * 512 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(512u * MB));
+ ASSERT_EQ(256 * pm * MB + 3 * 2048 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(1024u * MB));
+ ASSERT_EQ(512 * pm * MB + 3 * 4096 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(2048u * MB));
+ ASSERT_EQ(
+ 1024 * pm * MB + 3 * 8192 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(4096u) * MB));
+ ASSERT_EQ(
+ 1024 * pm * MB + 3 * 8192 * pm * KB,
+ i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(8192u) * MB));
}
TEST_F(HeapTest, ASLR) {
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
index 7c88f58521..be3ca20938 100644
--- a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -28,7 +28,7 @@ class SimpleTask : public ItemParallelJob::Task {
SimpleTask(Isolate* isolate, bool* did_run)
: ItemParallelJob::Task(isolate), did_run_(did_run) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
ItemParallelJob::Item* item = nullptr;
while ((item = GetItem<ItemParallelJob::Item>()) != nullptr) {
item->MarkFinished();
@@ -58,7 +58,7 @@ class EagerTask : public ItemParallelJob::Task {
public:
explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
SimpleItem* item = nullptr;
while ((item = GetItem<SimpleItem>()) != nullptr) {
item->Process();
@@ -120,7 +120,7 @@ class TaskProcessingOneItem : public ItemParallelJob::Task {
wait_when_done_(wait_when_done),
did_process_an_item_(did_process_an_item) {}
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
SimpleItem* item = GetItem<SimpleItem>();
if (did_process_an_item_) {
@@ -164,7 +164,7 @@ class TaskForDifferentItems : public ItemParallelJob::Task {
processed_b_(processed_b) {}
~TaskForDifferentItems() override = default;
- void RunInParallel() override {
+ void RunInParallel(Runner runner) override {
BaseItem* item = nullptr;
while ((item = GetItem<BaseItem>()) != nullptr) {
item->ProcessItem(this);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index c3aa7de234..3d02db7413 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -650,7 +650,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance));
iterator.Advance();
@@ -658,7 +658,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 5));
iterator.Advance();
@@ -666,7 +666,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 10));
iterator.Advance();
@@ -674,7 +674,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 15));
iterator.Advance();
@@ -683,7 +683,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
CHECK_EQ(iterator.current_bytecode(),
Bytecode::kJumpIfToBooleanFalseConstant);
- CHECK_EQ(iterator.GetConstantForIndexOperand(0),
+ CHECK_EQ(*(iterator.GetConstantForIndexOperand(0, isolate())),
Smi::FromInt(kFarJumpDistance - 20));
iterator.Advance();
}
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index a8907ba62a..b2eedd9807 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -81,7 +81,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
@@ -98,7 +99,8 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
CHECK(!iterator.done());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
index 0f6f0e99b0..466062b563 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc
@@ -184,7 +184,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, AccessesFirst) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), 0);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
}
@@ -331,7 +332,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, RandomAccessValid) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
iterator.GoToIndex(18);
@@ -488,7 +490,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -507,7 +510,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArray) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
offset += Bytecodes::Size(Bytecode::kLdaConstant, OperandScale::kSingle);
++iterator;
@@ -968,7 +972,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 2);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_1);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_1);
ASSERT_TRUE(iterator.IsValid());
--iterator;
@@ -987,7 +992,8 @@ TEST_F(BytecodeArrayRandomIteratorTest, IteratesBytecodeArrayBackwards) {
EXPECT_EQ(iterator.current_index(), 0);
EXPECT_EQ(iterator.current_offset(), offset);
EXPECT_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- EXPECT_EQ(iterator.GetConstantForIndexOperand(0).Number(), heap_num_0);
+ EXPECT_EQ(iterator.GetConstantForIndexOperand(0, isolate())->Number(),
+ heap_num_0);
ASSERT_TRUE(iterator.IsValid());
--iterator;
ASSERT_FALSE(iterator.IsValid());
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 1286591752..cb9a83997e 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -63,6 +63,15 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
+ const Matcher<c::LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
+ return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
+ index_matcher, _, _);
+}
+
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
@@ -436,7 +445,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -448,7 +457,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag),
LoadSensitivity::kCritical)));
#else
- Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -466,7 +475,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#if V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -480,7 +489,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
LoadSensitivity::kCritical)));
#else
- Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
@@ -506,13 +515,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
int offset = 16;
Node* load_field = m.LoadObjectField(object, offset);
#ifdef V8_COMPRESS_POINTERS
- EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoad(
+ EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag))));
#else
- EXPECT_THAT(load_field,
- m.IsLoad(MachineType::AnyTagged(), object,
- c::IsIntPtrConstant(offset - kHeapObjectTag)));
+ EXPECT_THAT(load_field, m.IsLoadFromObject(
+ MachineType::AnyTagged(), object,
+ c::IsIntPtrConstant(offset - kHeapObjectTag)));
#endif
}
}
@@ -593,21 +602,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
kSystemPointerSize)));
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
- m.IsLoad(MachineType::AnyCompressed(), load_function_matcher,
- c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
- kHeapObjectTag)));
+ m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher,
+ c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
+ kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector,
- IsChangeCompressedToTagged(m.IsLoad(
+ IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
#else
- Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
+ Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
- EXPECT_THAT(
- load_feedback_vector,
- m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
- c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
+ EXPECT_THAT(load_feedback_vector,
+ m.IsLoadFromObject(
+ MachineType::AnyTagged(), load_vector_cell_matcher,
+ c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
#endif
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index cec661b468..45ec5b4b7f 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -44,6 +44,10 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Matcher<compiler::Node*> IsLoadFromObject(
+ const Matcher<compiler::LoadRepresentation>& rep_matcher,
+ const Matcher<compiler::Node*>& base_matcher,
+ const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsStore(
const Matcher<compiler::StoreRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
diff --git a/deps/v8/test/unittests/logging/counters-unittest.cc b/deps/v8/test/unittests/logging/counters-unittest.cc
index 3dfb0ff92f..dd38d80ee4 100644
--- a/deps/v8/test/unittests/logging/counters-unittest.cc
+++ b/deps/v8/test/unittests/logging/counters-unittest.cc
@@ -88,9 +88,6 @@ class RuntimeCallStatsTest : public TestWithNativeContext {
return isolate()->counters()->runtime_call_stats();
}
- // Print current RuntimeCallStats table. For debugging purposes.
- void PrintStats() { stats()->Print(); }
-
RuntimeCallCounterId counter_id() {
return RuntimeCallCounterId::kTestCounter1;
}
@@ -655,6 +652,8 @@ static void CustomCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
} // namespace
TEST_F(RuntimeCallStatsTest, CallbackFunction) {
+ FLAG_allow_natives_syntax = true;
+
RuntimeCallCounter* callback_counter =
stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
@@ -710,9 +709,29 @@ TEST_F(RuntimeCallStatsTest, CallbackFunction) {
EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(100, counter()->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
+
+ // Check that the FunctionCallback tracing also works properly
+ // when the `callback` is called from optimized code.
+ RunJS(
+ "function wrap(o) { return o.callback(); };\n"
+ "%PrepareFunctionForOptimization(wrap);\n"
+ "wrap(custom_object);\n"
+ "wrap(custom_object);\n"
+ "%OptimizeFunctionOnNextCall(wrap);\n"
+ "wrap(custom_object);\n");
+ EXPECT_EQ(4, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4013, callback_counter->count());
+ EXPECT_EQ(4013, counter2()->count());
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 4013, counter2()->time().InMicroseconds());
}
TEST_F(RuntimeCallStatsTest, ApiGetter) {
+ FLAG_allow_natives_syntax = true;
+
RuntimeCallCounter* callback_counter =
stats()->GetCounter(RuntimeCallCounterId::kFunctionCallback);
current_test = this;
@@ -740,7 +759,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
Sleep(100);
RunJS("custom_object.apiGetter;");
}
- PrintStats();
EXPECT_EQ(1, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -754,7 +772,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(kCustomCallbackTime, counter2()->time().InMicroseconds());
RunJS("for (let i = 0; i < 9; i++) { custom_object.apiGetter };");
- PrintStats();
EXPECT_EQ(2, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -767,7 +784,6 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(kCustomCallbackTime * 10, counter2()->time().InMicroseconds());
RunJS("for (let i = 0; i < 4000; i++) { custom_object.apiGetter };");
- PrintStats();
EXPECT_EQ(3, js_counter()->count());
EXPECT_EQ(1, counter()->count());
@@ -779,7 +795,25 @@ TEST_F(RuntimeCallStatsTest, ApiGetter) {
EXPECT_EQ(0, callback_counter->time().InMicroseconds());
EXPECT_EQ(kCustomCallbackTime * 4010, counter2()->time().InMicroseconds());
- PrintStats();
+ // Check that the FunctionCallback tracing also works properly
+ // when the `apiGetter` is called from optimized code.
+ RunJS(
+ "function wrap(o) { return o.apiGetter; };\n"
+ "%PrepareFunctionForOptimization(wrap);\n"
+ "wrap(custom_object);\n"
+ "wrap(custom_object);\n"
+ "%OptimizeFunctionOnNextCall(wrap);\n"
+ "wrap(custom_object);\n");
+
+ EXPECT_EQ(4, js_counter()->count());
+ EXPECT_EQ(1, counter()->count());
+ EXPECT_EQ(4013, callback_counter->count());
+ EXPECT_EQ(4013, counter2()->count());
+
+ EXPECT_EQ(0, js_counter()->time().InMicroseconds());
+ EXPECT_EQ(100, counter()->time().InMicroseconds());
+ EXPECT_EQ(0, callback_counter->time().InMicroseconds());
+ EXPECT_EQ(kCustomCallbackTime * 4013, counter2()->time().InMicroseconds());
}
TEST_F(SnapshotNativeCounterTest, StringAddNative) {
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 38aae33809..a3a6fb22a7 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -2885,5 +2885,68 @@ TEST_F(ValueSerializerTestWithLimitedMemory, FailIfNoMemoryInWriteHostObject) {
EXPECT_TRUE(EvaluateScriptForInput("gotA")->IsFalse());
}
+// We only have basic tests and tests for .stack here, because we have more
+// comprehensive tests as web platform tests.
+TEST_F(ValueSerializerTest, RoundTripError) {
+ Local<Value> value = RoundTripTest("Error('hello')");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> name;
+ Local<Value> message;
+
+ {
+ Context::Scope scope(deserialization_context());
+ EXPECT_EQ(error->GetPrototype(), Exception::Error(String::Empty(isolate()))
+ .As<Object>()
+ ->GetPrototype());
+ }
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("name"))
+ .ToLocal(&name));
+ ASSERT_TRUE(name->IsString());
+ EXPECT_EQ(Utf8Value(name), "Error");
+
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("message"))
+ .ToLocal(&message));
+ ASSERT_TRUE(message->IsString());
+ EXPECT_EQ(Utf8Value(message), "hello");
+}
+
+TEST_F(ValueSerializerTest, DefaultErrorStack) {
+ Local<Value> value =
+ RoundTripTest("function hkalkcow() { return Error(); } hkalkcow();");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ ASSERT_TRUE(stack->IsString());
+ EXPECT_NE(Utf8Value(stack).find("hkalkcow"), std::string::npos);
+}
+
+TEST_F(ValueSerializerTest, ModifiedErrorStack) {
+ Local<Value> value = RoundTripTest("let e = Error(); e.stack = 'hello'; e");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ ASSERT_TRUE(stack->IsString());
+ EXPECT_EQ(Utf8Value(stack), "hello");
+}
+
+TEST_F(ValueSerializerTest, NonStringErrorStack) {
+ Local<Value> value = RoundTripTest("let e = Error(); e.stack = 17; e");
+ ASSERT_TRUE(value->IsObject());
+ Local<Object> error = value.As<Object>();
+
+ Local<Value> stack;
+ ASSERT_TRUE(error->Get(deserialization_context(), StringFromUtf8("stack"))
+ .ToLocal(&stack));
+ EXPECT_TRUE(stack->IsUndefined());
+}
+
} // namespace
} // namespace v8
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index f85b3bf128..8b425542c1 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -73,7 +73,7 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kShouldEagerCompile, shared->StartPosition(), true,
- shared->FunctionLiteralId(isolate), nullptr);
+ shared->function_literal_id(), nullptr);
return new BackgroundCompileTask(
allocator(), outer_parse_info.get(), function_name, function_literal,
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 94209b8b10..614ddba4f5 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -42,11 +42,11 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
isolate->factory()->NewStringFromAsciiChecked("f"),
Builtins::kCompileLazy);
int function_literal_id = 1;
+ shared->set_function_literal_id(function_literal_id);
// Ensure that the function can be compiled lazily.
shared->set_uncompiled_data(
*isolate->factory()->NewUncompiledDataWithoutPreparseData(
- ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length(),
- function_literal_id));
+ ReadOnlyRoots(isolate).empty_string_handle(), 0, source->length()));
// Make sure we have an outer scope info, even though it's empty
shared->set_raw_outer_scope_info_or_feedback_metadata(
ScopeInfo::Empty(isolate));
diff --git a/deps/v8/test/unittests/torque/earley-parser-unittest.cc b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
index 9718a404c9..f44a49b047 100644
--- a/deps/v8/test/unittests/torque/earley-parser-unittest.cc
+++ b/deps/v8/test/unittests/torque/earley-parser-unittest.cc
@@ -68,7 +68,7 @@ struct SimpleArithmeticGrammar : Grammar {
TEST(EarleyParser, SimpleArithmetic) {
SimpleArithmeticGrammar grammar;
- SourceFileMap::Scope source_file_map;
+ SourceFileMap::Scope source_file_map("");
CurrentSourceFile::Scope current_source_file{
SourceFileMap::AddSource("dummy_filename")};
std::string result1 =
diff --git a/deps/v8/test/unittests/torque/ls-message-unittest.cc b/deps/v8/test/unittests/torque/ls-message-unittest.cc
index 06346d32bb..c6779f978d 100644
--- a/deps/v8/test/unittests/torque/ls-message-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-message-unittest.cc
@@ -20,15 +20,19 @@ TEST(LanguageServerMessage, InitializeRequest) {
request.set_method("initialize");
request.params();
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- InitializeResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ InitializeResponse response(std::move(raw_response));
// Check that the response id matches up with the request id, and that
// the language server signals its support for definitions.
EXPECT_EQ(response.id(), 5);
EXPECT_TRUE(response.result().capabilities().definitionProvider());
EXPECT_TRUE(response.result().capabilities().documentSymbolProvider());
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage,
@@ -36,8 +40,10 @@ TEST(LanguageServerMessage,
Request<bool> notification;
notification.set_method("initialized");
- HandleMessage(notification.GetJsonValue(), [](JsonValue& raw_request) {
- RegistrationRequest request(raw_request);
+ bool writer_called = false;
+ HandleMessage(std::move(notification.GetJsonValue()), [&](JsonValue
+ raw_request) {
+ RegistrationRequest request(std::move(raw_request));
ASSERT_EQ(request.method(), "client/registerCapability");
ASSERT_EQ(request.params().registrations_size(), (size_t)1);
@@ -49,26 +55,33 @@ TEST(LanguageServerMessage,
registration
.registerOptions<DidChangeWatchedFilesRegistrationOptions>();
ASSERT_EQ(options.watchers_size(), (size_t)1);
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, GotoDefinitionUnkownFile) {
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
GotoDefinitionRequest request;
request.set_id(42);
request.set_method("textDocument/definition");
request.params().textDocument().set_uri("file:///unknown.tq");
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_TRUE(response.IsNull("result"));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, GotoDefinition) {
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
SourceId test_id = SourceFileMap::AddSource("file://test.tq");
SourceId definition_id = SourceFileMap::AddSource("file://base.tq");
@@ -84,11 +97,15 @@ TEST(LanguageServerMessage, GotoDefinition) {
request.params().position().set_line(2);
request.params().position().set_character(0);
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_TRUE(response.IsNull("result"));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
// Second, check a known defintion.
request = GotoDefinitionRequest();
@@ -98,8 +115,9 @@ TEST(LanguageServerMessage, GotoDefinition) {
request.params().position().set_line(1);
request.params().position().set_character(5);
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- GotoDefinitionResponse response(raw_response);
+ writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ GotoDefinitionResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 43);
ASSERT_FALSE(response.IsNull("result"));
@@ -109,22 +127,26 @@ TEST(LanguageServerMessage, GotoDefinition) {
EXPECT_EQ(location.range().start().character(), 1);
EXPECT_EQ(location.range().end().line(), 4);
EXPECT_EQ(location.range().end().character(), 5);
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, CompilationErrorSendsDiagnostics) {
DiagnosticsFiles::Scope diagnostic_files_scope;
LanguageServerData::Scope server_data_scope;
TorqueMessages::Scope messages_scope;
- SourceFileMap::Scope source_file_map_scope;
+ SourceFileMap::Scope source_file_map_scope("");
TorqueCompilerResult result;
{ Error("compilation failed somehow"); }
result.messages = std::move(TorqueMessages::Get());
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
- PublishDiagnosticsNotification notification(raw_response);
+ bool writer_called = false;
+ CompilationFinished(std::move(result), [&](JsonValue raw_response) {
+ PublishDiagnosticsNotification notification(std::move(raw_response));
EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
ASSERT_FALSE(notification.IsNull("params"));
@@ -134,15 +156,18 @@ TEST(LanguageServerMessage, CompilationErrorSendsDiagnostics) {
Diagnostic diagnostic = notification.params().diagnostics(0);
EXPECT_EQ(diagnostic.severity(), Diagnostic::kError);
EXPECT_EQ(diagnostic.message(), "compilation failed somehow");
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
DiagnosticsFiles::Scope diagnostic_files_scope;
TorqueMessages::Scope messages_scope;
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
- SourceId test_id = SourceFileMap::AddSource("test.tq");
+ SourceFileMap::Scope sourc_file_map_scope("");
+ SourceId test_id = SourceFileMap::AddSource("file://test.tq");
// No compilation errors but two lint warnings.
{
@@ -156,12 +181,13 @@ TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
result.messages = std::move(TorqueMessages::Get());
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
- PublishDiagnosticsNotification notification(raw_response);
+ bool writer_called = false;
+ CompilationFinished(std::move(result), [&](JsonValue raw_response) {
+ PublishDiagnosticsNotification notification(std::move(raw_response));
EXPECT_EQ(notification.method(), "textDocument/publishDiagnostics");
ASSERT_FALSE(notification.IsNull("params"));
- EXPECT_EQ(notification.params().uri(), "test.tq");
+ EXPECT_EQ(notification.params().uri(), "file://test.tq");
ASSERT_EQ(notification.params().diagnostics_size(), static_cast<size_t>(2));
Diagnostic diagnostic1 = notification.params().diagnostics(0);
@@ -171,35 +197,42 @@ TEST(LanguageServerMessage, LintErrorSendsDiagnostics) {
Diagnostic diagnostic2 = notification.params().diagnostics(1);
EXPECT_EQ(diagnostic2.severity(), Diagnostic::kWarning);
EXPECT_EQ(diagnostic2.message(), "lint error 2");
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
TEST(LanguageServerMessage, CleanCompileSendsNoDiagnostics) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
+ SourceFileMap::Scope sourc_file_map_scope("");
TorqueCompilerResult result;
result.source_file_map = SourceFileMap::Get();
- CompilationFinished(std::move(result), [](JsonValue& raw_response) {
+ CompilationFinished(std::move(result), [](JsonValue raw_response) {
FAIL() << "Sending unexpected response!";
});
}
TEST(LanguageServerMessage, NoSymbolsSendsEmptyResponse) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope sourc_file_map_scope;
+ SourceFileMap::Scope sourc_file_map_scope("");
DocumentSymbolRequest request;
request.set_id(42);
request.set_method("textDocument/documentSymbol");
- request.params().textDocument().set_uri("test.tq");
+ request.params().textDocument().set_uri("file://test.tq");
- HandleMessage(request.GetJsonValue(), [](JsonValue& raw_response) {
- DocumentSymbolResponse response(raw_response);
+ bool writer_called = false;
+ HandleMessage(std::move(request.GetJsonValue()), [&](JsonValue raw_response) {
+ DocumentSymbolResponse response(std::move(raw_response));
EXPECT_EQ(response.id(), 42);
EXPECT_EQ(response.result_size(), static_cast<size_t>(0));
+
+ writer_called = true;
});
+ EXPECT_TRUE(writer_called);
}
} // namespace ls
diff --git a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
index ad67bf0f21..7e23f6dcf4 100644
--- a/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
+++ b/deps/v8/test/unittests/torque/ls-server-data-unittest.cc
@@ -13,7 +13,7 @@ namespace torque {
namespace {
struct TestCompiler {
- SourceFileMap::Scope file_map_scope;
+ SourceFileMap::Scope file_map_scope{""};
LanguageServerData::Scope server_data_scope;
void Compile(const std::string& source) {
@@ -23,7 +23,7 @@ struct TestCompiler {
options.force_assert_statements = true;
TorqueCompilerResult result = CompileTorque(source, options);
- SourceFileMap::Get() = result.source_file_map;
+ SourceFileMap::Get() = *result.source_file_map;
LanguageServerData::Get() = std::move(result.language_server_data);
}
};
@@ -42,7 +42,7 @@ TEST(LanguageServer, GotoTypeDefinition) {
compiler.Compile(source);
// Find the definition for type 'T1' of argument 'a' on line 4.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {4, 19});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
@@ -64,7 +64,7 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
compiler.Compile(source);
// Find the definition for 'T1' of the extends clause on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 16});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 7}}));
@@ -72,7 +72,7 @@ TEST(LanguageServer, GotoTypeDefinitionExtends) {
TEST(LanguageServer, GotoTypeDefinitionNoDataForFile) {
LanguageServerData::Scope server_data_scope;
- SourceFileMap::Scope file_scope;
+ SourceFileMap::Scope file_scope("");
SourceId test_id = SourceFileMap::AddSource("test.tq");
// Regression test, this step should not crash.
@@ -94,7 +94,7 @@ TEST(LanguageServer, GotoLabelDefinitionInSignature) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 18});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {5, 19}, {5, 26}}));
@@ -116,7 +116,7 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlock) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 25});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
@@ -133,7 +133,7 @@ TEST(LanguageServer, GotoDefinitionClassSuperType) {
compiler.Compile(source);
// Find the definition for 'Tagged' of the 'extends' on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 33});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 5}, {2, 11}}));
@@ -151,7 +151,7 @@ TEST(LanguageServer, GotoLabelDefinitionInSignatureGotoStmt) {
compiler.Compile(source);
// Find the definition for 'Fail' of the goto statement on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 7});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {2, 26}, {2, 30}}));
@@ -170,7 +170,7 @@ TEST(LanguageServer, GotoLabelDefinitionInTryBlockGoto) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the goto statement on line 3.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {3, 13});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {4, 8}, {4, 15}}));
@@ -192,7 +192,7 @@ TEST(LanguageServer, GotoLabelDefinitionGotoInOtherwise) {
compiler.Compile(source);
// Find the definition for 'Bailout' of the otherwise clause on line 6.
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
auto maybe_position = LanguageServerData::FindDefinition(id, {6, 30});
ASSERT_TRUE(maybe_position.has_value());
EXPECT_EQ(*maybe_position, (SourcePosition{id, {7, 8}, {7, 15}}));
@@ -214,7 +214,7 @@ TEST(LanguageServer, SymbolsArePopulated) {
TestCompiler compiler;
compiler.Compile(source);
- const SourceId id = SourceFileMap::GetSourceId("<torque>");
+ const SourceId id = SourceFileMap::GetSourceId("dummy-filename.tq");
const auto& symbols = LanguageServerData::SymbolsForSourceId(id);
ASSERT_FALSE(symbols.empty());
}
diff --git a/deps/v8/test/unittests/torque/torque-unittest.cc b/deps/v8/test/unittests/torque/torque-unittest.cc
index 9a82498ee4..1366f86ce7 100644
--- a/deps/v8/test/unittests/torque/torque-unittest.cc
+++ b/deps/v8/test/unittests/torque/torque-unittest.cc
@@ -64,6 +64,7 @@ TorqueCompilerResult TestCompileTorque(std::string source) {
options.output_directory = "";
options.collect_language_server_data = false;
options.force_assert_statements = false;
+ options.v8_root = ".";
source = kTestTorquePrelude + source;
return CompileTorque(source, options);
@@ -128,6 +129,7 @@ TEST(Torque, ClassDefinition) {
i: uintptr;
}
+ @export
macro TestClassWithAllTypesLoadsAndStores(
t: TestClassWithAllTypes, r: RawPtr, v1: int8, v2: uint8, v3: int16,
v4: uint16, v5: int32, v6: uint32, v7: intptr, v8: uintptr) {
@@ -158,12 +160,10 @@ TEST(Torque, TypeDeclarationOrder) {
type Baztype = Foo | FooType;
@abstract
- @noVerifier
extern class Foo extends HeapObject {
fooField: FooType;
}
- @noVerifier
extern class Bar extends Foo {
barField: Bartype;
bazfield: Baztype;
@@ -179,7 +179,6 @@ TEST(Torque, ConditionalFields) {
// This class should throw alignment errors if @if decorators aren't
// working.
ExpectSuccessfulCompilation(R"(
- @noVerifier
extern class PreprocessingTest extends HeapObject {
@if(FALSE_FOR_TESTING) a: int8;
@if(TRUE_FOR_TESTING) a: int16;
@@ -192,7 +191,6 @@ TEST(Torque, ConditionalFields) {
}
)");
ExpectFailingCompilation(R"(
- @noVerifier
extern class PreprocessingTest extends HeapObject {
@if(TRUE_FOR_TESTING) a: int8;
@if(FALSE_FOR_TESTING) a: int16;
@@ -209,10 +207,110 @@ TEST(Torque, ConditionalFields) {
TEST(Torque, ConstexprLetBindingDoesNotCrash) {
ExpectFailingCompilation(
- R"(macro FooBar() { let foo = 0; check(foo >= 0); })",
+ R"(@export macro FooBar() { let foo = 0; check(foo >= 0); })",
HasSubstr("Use 'const' instead of 'let' for variable 'foo'"));
}
+TEST(Torque, DoubleUnderScorePrefixIllegalForIdentifiers) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo() {
+ let __x;
+ }
+ )",
+ HasSubstr("Lexer Error"));
+}
+
+TEST(Torque, UnusedLetBindingLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let x: Smi = y;
+ }
+ )",
+ HasSubstr("Variable 'x' is never used."));
+}
+
+TEST(Torque, UnderscorePrefixSilencesUnusedWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let _x: Smi = y;
+ }
+ )");
+}
+
+TEST(Torque, UsingUnderscorePrefixedIdentifierError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi) {
+ let _x: Smi = y;
+ check(_x == y);
+ }
+ )",
+ HasSubstr("Trying to reference '_x'"));
+}
+
+TEST(Torque, UnusedArgumentLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(x: Smi) {}
+ )",
+ HasSubstr("Variable 'x' is never used."));
+}
+
+TEST(Torque, UsingUnderscorePrefixedArgumentSilencesWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(_y: Smi) {}
+ )");
+}
+
+TEST(Torque, UnusedLabelLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo() labels Bar {}
+ )",
+ HasSubstr("Label 'Bar' is never used."));
+}
+
+TEST(Torque, UsingUnderScorePrefixLabelSilencesWarning) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo() labels _Bar {}
+ )");
+}
+
+TEST(Torque, NoUnusedWarningForImplicitArguments) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(implicit c: Context, r: JSReceiver)() {}
+ )");
+}
+
+TEST(Torque, NoUnusedWarningForVariablesOnlyUsedInAsserts) {
+ ExpectSuccessfulCompilation(R"(
+ @export macro Foo(x: bool) {
+ assert(x);
+ }
+ )");
+}
+
+TEST(Torque, ImportNonExistentFile) {
+ ExpectFailingCompilation(R"(import "foo/bar.tq")",
+ HasSubstr("File 'foo/bar.tq' not found."));
+}
+
+TEST(Torque, LetShouldBeConstLintError) {
+ ExpectFailingCompilation(R"(
+ @export macro Foo(y: Smi): Smi {
+ let x: Smi = y;
+ return x;
+ })",
+ HasSubstr("Variable 'x' is never assigned to."));
+}
+
+TEST(Torque, LetShouldBeConstIsSkippedForStructs) {
+ ExpectSuccessfulCompilation(R"(
+ struct Foo{ a: Smi; }
+ @export macro Bar(x: Smi): Foo {
+ let foo = Foo{a: x};
+ return foo;
+ }
+ )");
+}
+
} // namespace torque
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 938956f07d..29cb176197 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -87,7 +87,8 @@ class ControlTransferTest : public TestWithZone {
}
void CheckNoOtherTargets(
- const byte* start, const byte* end, ControlTransferMap& map,
+ const byte* start, const byte* end,
+ ControlTransferMap& map, // NOLINT(runtime/references)
std::initializer_list<ExpectedControlTransfer> targets) {
// Check there are no other control targets.
for (pc_t pc = 0; start + pc < end; pc++) {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index aaf6215a8a..725f7f4a59 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -254,7 +254,7 @@ class TestModuleBuilder {
byte AddTable(ValueType type, uint32_t initial_size, bool has_maximum_size,
uint32_t maximum_size) {
- CHECK(type == kWasmAnyRef || type == kWasmAnyFunc);
+ CHECK(type == kWasmAnyRef || type == kWasmFuncRef);
mod.tables.emplace_back();
WasmTable& table = mod.tables.back();
table.type = type;
@@ -274,6 +274,11 @@ class TestModuleBuilder {
byte AddPassiveElementSegment() {
mod.elem_segments.emplace_back();
+ auto& init = mod.elem_segments.back();
+ // Add 5 empty elements.
+ for (uint32_t j = 0; j < 5; j++) {
+ init.entries.push_back(WasmElemSegment::kNullIndex);
+ }
return static_cast<byte>(mod.elem_segments.size() - 1);
}
@@ -353,7 +358,7 @@ TEST_F(FunctionBodyDecoderTest, Float32Const) {
byte code[] = {kExprF32Const, 0, 0, 0, 0};
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
- WriteLittleEndianValue<float>(ptr, i * -7.75f);
+ base::WriteLittleEndianValue<float>(ptr, i * -7.75f);
ExpectValidates(sigs.f_ff(), code);
}
}
@@ -362,7 +367,7 @@ TEST_F(FunctionBodyDecoderTest, Float64Const) {
byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
Address ptr = reinterpret_cast<Address>(code + 1);
for (int i = 0; i < 30; i++) {
- WriteLittleEndianValue<double>(ptr, i * 33.45);
+ base::WriteLittleEndianValue<double>(ptr, i * 33.45);
ExpectValidates(sigs.d_dd(), code);
}
}
@@ -1637,7 +1642,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectReturnCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ builder.AddTable(kWasmFuncRef, 20, true, 30);
module = builder.module();
byte f0 = builder.AddSignature(sigs.i_v());
@@ -1656,7 +1661,7 @@ TEST_F(FunctionBodyDecoderTest, IndirectReturnCallsOutOfBounds) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
ExpectFailure(sig, {WASM_RETURN_CALL_INDIRECT0(0, WASM_ZERO)});
@@ -1779,7 +1784,7 @@ TEST_F(FunctionBodyDecoderTest, MultiReturnType) {
TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
byte f0 = builder.AddSignature(sigs.i_v());
@@ -1795,7 +1800,7 @@ TEST_F(FunctionBodyDecoderTest, SimpleIndirectCalls) {
TEST_F(FunctionBodyDecoderTest, IndirectCallsOutOfBounds) {
FunctionSig* sig = sigs.i_i();
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyFunc, 20, false, 20);
+ builder.AddTable(kWasmFuncRef, 20, false, 20);
module = builder.module();
ExpectFailure(sig, {WASM_CALL_INDIRECT0(0, WASM_ZERO)});
@@ -2031,97 +2036,97 @@ TEST_F(FunctionBodyDecoderTest, AllSetGlobalCombinations) {
}
}
-TEST_F(FunctionBodyDecoderTest, SetTable) {
+TEST_F(FunctionBodyDecoderTest, TableSet) {
WASM_FEATURE_SCOPE(anyref);
TestModuleBuilder builder;
module = builder.module();
byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
- byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte local_ref = 0;
byte local_func = 1;
byte local_int = 2;
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(6),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(6),
WASM_GET_LOCAL(local_ref))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(5),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(5),
WASM_GET_LOCAL(local_func))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(7),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_func2, WASM_I32V(7),
WASM_GET_LOCAL(local_func))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(8),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref2, WASM_I32V(8),
WASM_GET_LOCAL(local_ref))});
- // We can store anyfunc values as anyref, but not the other way around.
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(4),
+ // We can store funcref values as anyref, but not the other way around.
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(4),
WASM_GET_LOCAL(local_func))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(9),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(9),
WASM_GET_LOCAL(local_ref))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func2, WASM_I32V(3),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func2, WASM_I32V(3),
WASM_GET_LOCAL(local_ref))});
- ExpectValidates(&sig, {WASM_SET_TABLE(tab_ref2, WASM_I32V(2),
+ ExpectValidates(&sig, {WASM_TABLE_SET(tab_ref2, WASM_I32V(2),
WASM_GET_LOCAL(local_func))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_ref1, WASM_I32V(9),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_ref1, WASM_I32V(9),
WASM_GET_LOCAL(local_int))});
- ExpectFailure(&sig, {WASM_SET_TABLE(tab_func1, WASM_I32V(3),
+ ExpectFailure(&sig, {WASM_TABLE_SET(tab_func1, WASM_I32V(3),
WASM_GET_LOCAL(local_int))});
// Out-of-bounds table index should fail.
byte oob_tab = 37;
ExpectFailure(
- &sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(9), WASM_GET_LOCAL(local_ref))});
- ExpectFailure(&sig, {WASM_SET_TABLE(oob_tab, WASM_I32V(3),
+ &sig, {WASM_TABLE_SET(oob_tab, WASM_I32V(9), WASM_GET_LOCAL(local_ref))});
+ ExpectFailure(&sig, {WASM_TABLE_SET(oob_tab, WASM_I32V(3),
WASM_GET_LOCAL(local_func))});
}
-TEST_F(FunctionBodyDecoderTest, GetTable) {
+TEST_F(FunctionBodyDecoderTest, TableGet) {
WASM_FEATURE_SCOPE(anyref);
TestModuleBuilder builder;
module = builder.module();
byte tab_ref1 = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func1 = builder.AddTable(kWasmAnyFunc, 20, true, 30);
- byte tab_func2 = builder.AddTable(kWasmAnyFunc, 10, false, 20);
+ byte tab_func1 = builder.AddTable(kWasmFuncRef, 20, true, 30);
+ byte tab_func2 = builder.AddTable(kWasmFuncRef, 10, false, 20);
byte tab_ref2 = builder.AddTable(kWasmAnyRef, 10, false, 20);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte local_ref = 0;
byte local_func = 1;
byte local_int = 2;
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref1, WASM_I32V(6)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_ref1, WASM_I32V(6)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_ref2, WASM_I32V(8)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_ref2, WASM_I32V(8)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func1, WASM_I32V(5)))});
+ {WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func1, WASM_I32V(5)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_func, WASM_GET_TABLE(tab_func2, WASM_I32V(7)))});
+ {WASM_SET_LOCAL(local_func, WASM_TABLE_GET(tab_func2, WASM_I32V(7)))});
- // We can store anyfunc values as anyref, but not the other way around.
+ // We can store funcref values as anyref, but not the other way around.
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(tab_ref1, WASM_I32V(4)))});
+ WASM_TABLE_GET(tab_ref1, WASM_I32V(4)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func1, WASM_I32V(9)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_func1, WASM_I32V(9)))});
ExpectValidates(
&sig,
- {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(tab_func2, WASM_I32V(3)))});
+ {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(tab_func2, WASM_I32V(3)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(tab_ref2, WASM_I32V(2)))});
+ WASM_TABLE_GET(tab_ref2, WASM_I32V(2)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_int,
- WASM_GET_TABLE(tab_ref1, WASM_I32V(9)))});
+ WASM_TABLE_GET(tab_ref1, WASM_I32V(9)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(
- local_int, WASM_GET_TABLE(tab_func1, WASM_I32V(3)))});
+ local_int, WASM_TABLE_GET(tab_func1, WASM_I32V(3)))});
// Out-of-bounds table index should fail.
byte oob_tab = 37;
ExpectFailure(
- &sig, {WASM_SET_LOCAL(local_ref, WASM_GET_TABLE(oob_tab, WASM_I32V(9)))});
+ &sig, {WASM_SET_LOCAL(local_ref, WASM_TABLE_GET(oob_tab, WASM_I32V(9)))});
ExpectFailure(&sig, {WASM_SET_LOCAL(local_func,
- WASM_GET_TABLE(oob_tab, WASM_I32V(3)))});
+ WASM_TABLE_GET(oob_tab, WASM_I32V(3)))});
}
TEST_F(FunctionBodyDecoderTest, MultiTableCallIndirect) {
@@ -2129,13 +2134,13 @@ TEST_F(FunctionBodyDecoderTest, MultiTableCallIndirect) {
TestModuleBuilder builder;
module = builder.module();
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
- byte tab_func = builder.AddTable(kWasmAnyFunc, 20, true, 30);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 20, true, 30);
- ValueType sig_types[]{kWasmAnyRef, kWasmAnyFunc, kWasmI32};
+ ValueType sig_types[]{kWasmAnyRef, kWasmFuncRef, kWasmI32};
FunctionSig sig(0, 3, sig_types);
byte sig_index = builder.AddSignature(sigs.i_v());
- // We can store anyfunc values as anyref, but not the other way around.
+ // We can store funcref values as anyref, but not the other way around.
ExpectValidates(sigs.i_v(),
{kExprI32Const, 0, kExprCallIndirect, sig_index, tab_func});
@@ -2154,7 +2159,7 @@ TEST_F(FunctionBodyDecoderTest, WasmMemoryGrow) {
}
TEST_F(FunctionBodyDecoderTest, AsmJsMemoryGrow) {
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
@@ -2186,7 +2191,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsBinOpsCheckOrigin) {
};
{
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsBinOps); i++) {
@@ -2234,7 +2239,7 @@ TEST_F(FunctionBodyDecoderTest, AsmJsUnOpsCheckOrigin) {
{kExprI32AsmjsSConvertF64, sigs.i_d()},
{kExprI32AsmjsUConvertF64, sigs.i_d()}};
{
- TestModuleBuilder builder(kAsmJsOrigin);
+ TestModuleBuilder builder(kAsmJsSloppyOrigin);
module = builder.module();
builder.InitializeMemory();
for (size_t i = 0; i < arraysize(AsmJsUnOps); i++) {
@@ -3104,7 +3109,7 @@ TEST_F(FunctionBodyDecoderTest, MemoryInit) {
ExpectValidates(sigs.v_v(),
{WASM_MEMORY_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, MemoryInitInvalid) {
@@ -3174,12 +3179,12 @@ TEST_F(FunctionBodyDecoderTest, TableInit) {
module = builder.module();
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_INIT(0, 1, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
@@ -3189,7 +3194,8 @@ TEST_F(FunctionBodyDecoderTest, TableInitInvalid) {
module = builder.module();
WASM_FEATURE_SCOPE(bulk_memory);
- byte code[] = {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO), WASM_END};
+ byte code[] = {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO),
+ WASM_END};
for (size_t i = 0; i <= arraysize(code); ++i) {
Validate(i == arraysize(code), sigs.v_v(), VectorOf(code, i), kOmitEnd);
}
@@ -3212,15 +3218,16 @@ TEST_F(FunctionBodyDecoderTest, TableCopy) {
builder.InitializeTable();
module = builder.module();
- ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ ExpectFailure(sigs.v_v(),
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
WASM_FEATURE_SCOPE(bulk_memory);
ExpectValidates(sigs.v_v(),
- {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
}
TEST_F(FunctionBodyDecoderTest, TableGrow) {
TestModuleBuilder builder;
- byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
module = builder.module();
@@ -3232,10 +3239,10 @@ TEST_F(FunctionBodyDecoderTest, TableGrow) {
{WASM_TABLE_GROW(tab_func, WASM_REF_NULL, WASM_ONE)});
ExpectValidates(sigs.i_r(),
{WASM_TABLE_GROW(tab_ref, WASM_REF_NULL, WASM_ONE)});
- // Anyfunc table cannot be initialized with an anyref value.
+ // FuncRef table cannot be initialized with an anyref value.
ExpectFailure(sigs.i_r(),
{WASM_TABLE_GROW(tab_func, WASM_GET_LOCAL(0), WASM_ONE)});
- // Anyref table can be initialized with an anyfunc value.
+ // Anyref table can be initialized with an funcref value.
ExpectValidates(sigs.i_a(),
{WASM_TABLE_GROW(tab_ref, WASM_GET_LOCAL(0), WASM_ONE)});
// Check that the table index gets verified.
@@ -3245,7 +3252,7 @@ TEST_F(FunctionBodyDecoderTest, TableGrow) {
TEST_F(FunctionBodyDecoderTest, TableSize) {
TestModuleBuilder builder;
- int tab = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ int tab = builder.AddTable(kWasmFuncRef, 10, true, 20);
module = builder.module();
@@ -3257,7 +3264,7 @@ TEST_F(FunctionBodyDecoderTest, TableSize) {
TEST_F(FunctionBodyDecoderTest, TableFill) {
TestModuleBuilder builder;
- byte tab_func = builder.AddTable(kWasmAnyFunc, 10, true, 20);
+ byte tab_func = builder.AddTable(kWasmFuncRef, 10, true, 20);
byte tab_ref = builder.AddTable(kWasmAnyRef, 10, true, 20);
module = builder.module();
@@ -3269,10 +3276,10 @@ TEST_F(FunctionBodyDecoderTest, TableFill) {
WASM_REF_NULL, WASM_ONE)});
ExpectValidates(sigs.v_r(), {WASM_TABLE_FILL(tab_ref, WASM_ONE, WASM_REF_NULL,
WASM_ONE)});
- // Anyfunc table cannot be initialized with an anyref value.
+ // FuncRef table cannot be initialized with an anyref value.
ExpectFailure(sigs.v_r(), {WASM_TABLE_FILL(tab_func, WASM_ONE,
WASM_GET_LOCAL(0), WASM_ONE)});
- // Anyref table can be initialized with an anyfunc value.
+ // Anyref table can be initialized with an funcref value.
ExpectValidates(sigs.v_a(), {WASM_TABLE_FILL(tab_ref, WASM_ONE,
WASM_GET_LOCAL(0), WASM_ONE)});
// Check that the table index gets verified.
@@ -3282,7 +3289,7 @@ TEST_F(FunctionBodyDecoderTest, TableFill) {
TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
TestModuleBuilder builder;
- builder.AddTable(kWasmAnyRef, 10, true, 20);
+ module = builder.module();
{
WASM_FEATURE_SCOPE(anyref);
ExpectFailure(sigs.i_v(), {WASM_TABLE_GROW(0, WASM_REF_NULL, WASM_ONE)});
@@ -3294,10 +3301,93 @@ TEST_F(FunctionBodyDecoderTest, TableOpsWithoutTable) {
WASM_FEATURE_SCOPE(bulk_memory);
builder.AddPassiveElementSegment();
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_INIT(0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
- ExpectFailure(sigs.v_v(), {WASM_ELEM_DROP(0)});
+ {WASM_TABLE_INIT(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
ExpectFailure(sigs.v_v(),
- {WASM_TABLE_COPY(WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ {WASM_TABLE_COPY(0, 0, WASM_ZERO, WASM_ZERO, WASM_ZERO)});
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, TableCopyMultiTable) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added one table, therefore table.copy on table 0 should work.
+ int table_src = 0;
+ int table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ // There is only one table, so table.copy on table 1 should fail.
+ table_src = 0;
+ table_dst = 1;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ table_src = 1;
+ table_dst = 0;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ }
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added two tables, therefore table.copy on table 0 should work.
+ int table_src = 0;
+ int table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ // Also table.copy on table 1 should work now.
+ table_src = 1;
+ table_dst = 0;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ table_src = 0;
+ table_dst = 1;
+ ExpectValidates(sigs.v_v(),
+ {WASM_TABLE_COPY(table_dst, table_src, WASM_ZERO, WASM_ZERO,
+ WASM_ZERO)});
+ }
+}
+
+TEST_F(FunctionBodyDecoderTest, TableInitMultiTable) {
+ WASM_FEATURE_SCOPE(bulk_memory);
+ WASM_FEATURE_SCOPE(anyref);
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added one table, therefore table.init on table 0 should work.
+ int table_index = 0;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ // There is only one table, so table.init on table 1 should fail.
+ table_index = 1;
+ ExpectFailure(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ }
+ {
+ TestModuleBuilder builder;
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddTable(kWasmAnyRef, 10, true, 20);
+ builder.AddPassiveElementSegment();
+ module = builder.module();
+ // We added two tables, therefore table.init on table 0 should work.
+ int table_index = 0;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
+ // Also table.init on table 1 should work now.
+ table_index = 1;
+ ExpectValidates(sigs.v_v(), {WASM_TABLE_INIT(table_index, 0, WASM_ZERO,
+ WASM_ZERO, WASM_ZERO)});
}
}
@@ -3628,9 +3718,9 @@ TEST_F(LocalDeclDecoderTest, UseEncoder) {
pos = ExpectRun(map, pos, kWasmI64, 212);
}
-TEST_F(LocalDeclDecoderTest, ExceptRef) {
+TEST_F(LocalDeclDecoderTest, ExnRef) {
WASM_FEATURE_SCOPE(eh);
- ValueType type = kWasmExceptRef;
+ ValueType type = kWasmExnRef;
const byte data[] = {1, 1,
static_cast<byte>(ValueTypes::ValueTypeCodeFor(type))};
BodyLocalDecls decls(zone());
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index d63819ba70..4493fcf1dd 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -167,7 +167,7 @@ struct ValueTypePair {
{kLocalI64, kWasmI64}, // --
{kLocalF32, kWasmF32}, // --
{kLocalF64, kWasmF64}, // --
- {kLocalAnyFunc, kWasmAnyFunc}, // --
+ {kLocalFuncRef, kWasmFuncRef}, // --
{kLocalAnyRef, kWasmAnyRef} // --
};
@@ -306,7 +306,7 @@ TEST_F(WasmModuleVerifyTest, AnyRefGlobal) {
}
}
-TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
+TEST_F(WasmModuleVerifyTest, FuncRefGlobal) {
WASM_FEATURE_SCOPE(anyref);
static const byte data[] = {
// sig#0 ---------------------------------------------------------------
@@ -315,10 +315,10 @@ TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
TWO_EMPTY_FUNCTIONS(SIG_INDEX(0)),
SECTION(Global, // --
ENTRY_COUNT(2), // --
- kLocalAnyFunc, // local type
+ kLocalFuncRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_NULL, // init
- kLocalAnyFunc, // local type
+ kLocalFuncRef, // local type
0, // immutable
WASM_INIT_EXPR_REF_FUNC(1)), // init
TWO_EMPTY_BODIES};
@@ -331,12 +331,12 @@ TEST_F(WasmModuleVerifyTest, AnyFuncGlobal) {
EXPECT_EQ(0u, result.value()->data_segments.size());
const WasmGlobal* global = &result.value()->globals[0];
- EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_EQ(kWasmFuncRef, global->type);
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kRefNullConst, global->init.kind);
global = &result.value()->globals[1];
- EXPECT_EQ(kWasmAnyFunc, global->type);
+ EXPECT_EQ(kWasmFuncRef, global->type);
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kRefFuncConst, global->init.kind);
EXPECT_EQ(uint32_t{1}, global->init.val.function_index);
@@ -439,7 +439,9 @@ TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
}
}
-static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
+static void AppendUint32v(
+ std::vector<byte>& buffer, // NOLINT(runtime/references)
+ uint32_t val) {
while (true) {
uint32_t next = val >> 7;
uint32_t out = val & 0x7F;
@@ -920,7 +922,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// code ----------------------------------------------------------------
ONE_EMPTY_BODY};
@@ -937,7 +939,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
TEST_F(WasmModuleVerifyTest, ElementSectionWithInternalTable) {
static const byte data[] = {
// table ---------------------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element, ENTRY_COUNT(0))};
@@ -951,7 +953,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionWithImportedTable) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
1), // initial size
// elements ------------------------------------------------------------
@@ -982,7 +984,7 @@ TEST_F(WasmModuleVerifyTest, Regression_735887) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1001,7 +1003,7 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1029,7 +1031,7 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
// funcs ------------------------------------------------------
FOUR_EMPTY_FUNCTIONS(SIG_INDEX(0)),
// table declaration -------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 8),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 8),
// table elements ----------------------------------------------
SECTION(Element,
ENTRY_COUNT(1), // entry count
@@ -1059,8 +1061,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTables) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 5, // table 0
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(2), // entry count
@@ -1092,21 +1094,21 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTables) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
ADD_COUNT('s'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
10), // initial size
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
4, // entry count
@@ -1146,8 +1148,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMultipleTablesArbitraryOrder) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 5, // table 0
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(3), // entry count
@@ -1183,21 +1185,21 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
ADD_COUNT('s'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
10), // initial size
// funcs ---------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
4, // entry count
@@ -1225,7 +1227,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionMixedTablesArbitraryOrder) {
EXPECT_VERIFIES(data);
}
-TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithAnyFunc) {
+TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithFuncRef) {
WASM_FEATURE_SCOPE(anyref);
WASM_FEATURE_SCOPE(bulk_memory);
static const byte data[] = {
@@ -1236,7 +1238,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionInitAnyRefTableWithAnyFunc) {
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
kLocalAnyRef, 0, 5, // table 0
- kLocalAnyFunc, 0, 9), // table 1
+ kLocalFuncRef, 0, 9), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(2), // entry count
@@ -1269,7 +1271,7 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
ADD_COUNT('m'), // module name
ADD_COUNT('t'), // table name
kExternalTable, // import kind
- kLocalAnyFunc, // elem_type
+ kLocalFuncRef, // elem_type
0, // no maximum field
5, // initial size
ADD_COUNT('m'), // module name
@@ -1282,8 +1284,8 @@ TEST_F(WasmModuleVerifyTest, ElementSectionDontInitAnyRefImportedTable) {
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration ---------------------------------------------------
SECTION(Table, ENTRY_COUNT(2), // section header
- kLocalAnyFunc, 0, 15, // table 0
- kLocalAnyFunc, 0, 19), // table 1
+ kLocalFuncRef, 0, 15, // table 0
+ kLocalFuncRef, 0, 19), // table 1
// elements ------------------------------------------------------------
SECTION(Element,
ENTRY_COUNT(4), // entry count
@@ -1327,10 +1329,10 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithoutFlag) {
static const byte data[] = {
SECTION(Table, // table section
ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
+ kLocalFuncRef, // table 1: type
0, // table 1: no maximum
10, // table 1: minimum size
- kLocalAnyFunc, // table 2: type
+ kLocalFuncRef, // table 2: type
0, // table 2: no maximum
10), // table 2: minimum size
};
@@ -1342,7 +1344,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
static const byte data[] = {
SECTION(Table, // table section
ENTRY_COUNT(2), // 2 tables
- kLocalAnyFunc, // table 1: type
+ kLocalFuncRef, // table 1: type
0, // table 1: no maximum
10, // table 1: minimum size
kLocalAnyRef, // table 2: type
@@ -1356,7 +1358,7 @@ TEST_F(WasmModuleVerifyTest, MultipleTablesWithFlag) {
EXPECT_EQ(2u, result.value()->tables.size());
EXPECT_EQ(10u, result.value()->tables[0].initial_size);
- EXPECT_EQ(kWasmAnyFunc, result.value()->tables[0].type);
+ EXPECT_EQ(kWasmFuncRef, result.value()->tables[0].type);
EXPECT_EQ(11u, result.value()->tables[1].initial_size);
EXPECT_EQ(kWasmAnyRef, result.value()->tables[1].type);
@@ -1544,7 +1546,7 @@ TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
TEST_F(WasmSignatureDecodeTest, Fail_anyref_without_flag) {
// Disable AnyRef support and check that decoding fails.
WASM_FEATURE_SCOPE_VAL(anyref, false);
- byte ref_types[] = {kLocalAnyFunc, kLocalAnyRef};
+ byte ref_types[] = {kLocalFuncRef, kLocalAnyRef};
for (byte invalid_type : ref_types) {
for (size_t i = 0;; i++) {
byte data[] = {SIG_ENTRY_x_xx(kLocalI32, kLocalI32, kLocalI32)};
@@ -2350,9 +2352,9 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegment) {
// funcs -----------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration -----------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
- SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyFunc, U32V_1(3),
+ SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalFuncRef, U32V_1(3),
REF_FUNC_ELEMENT(0), REF_FUNC_ELEMENT(0), REF_NULL_ELEMENT),
// code ------------------------------------------------------------------
ONE_EMPTY_BODY};
@@ -2369,7 +2371,7 @@ TEST_F(WasmModuleVerifyTest, PassiveElementSegmentAnyRef) {
// funcs -----------------------------------------------------------------
ONE_EMPTY_FUNCTION(SIG_INDEX(0)),
// table declaration -----------------------------------------------------
- SECTION(Table, ENTRY_COUNT(1), kLocalAnyFunc, 0, 1),
+ SECTION(Table, ENTRY_COUNT(1), kLocalFuncRef, 0, 1),
// element segments -----------------------------------------------------
SECTION(Element, ENTRY_COUNT(1), PASSIVE, kLocalAnyRef, U32V_1(0)),
// code ------------------------------------------------------------------
diff --git a/deps/v8/test/unittests/wasm/wasm-text-unittest.cc b/deps/v8/test/unittests/wasm/wasm-text-unittest.cc
new file mode 100644
index 0000000000..e960b730a5
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/wasm-text-unittest.cc
@@ -0,0 +1,121 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
+#include "test/common/wasm/test-signatures.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmTextTest : public TestWithIsolateAndZone {
+ public:
+ TestSignatures sigs;
+ WasmFeatures enabled_features_;
+
+ void TestInstruction(const byte* func_start, size_t func_size) {
+ WasmModuleBuilder mb(zone());
+ auto* fb = mb.AddFunction(sigs.v_v());
+ fb->EmitCode(func_start, static_cast<uint32_t>(func_size));
+ fb->Emit(kExprEnd);
+
+ ZoneBuffer buffer(zone());
+ mb.WriteTo(&buffer);
+
+ ModuleWireBytes wire_bytes(
+ Vector<const byte>(buffer.begin(), buffer.size()));
+
+ ModuleResult result = DecodeWasmModule(
+ enabled_features_, buffer.begin(), buffer.end(), false, kWasmOrigin,
+ isolate()->counters(), isolate()->wasm_engine()->allocator());
+ EXPECT_TRUE(result.ok());
+
+ std::stringstream ss;
+ PrintWasmText(result.value().get(), wire_bytes, 0, ss, nullptr);
+ }
+};
+
+TEST_F(WasmTextTest, EveryOpcodeCanBeDecoded) {
+ static const struct {
+ WasmOpcode opcode;
+ const char* debug_name;
+ } kValues[] = {
+#define DECLARE_ELEMENT(name, opcode, sig) {kExpr##name, "kExpr" #name},
+ FOREACH_OPCODE(DECLARE_ELEMENT)};
+#undef DECLARE_ELEMENT
+
+ for (const auto& value : kValues) {
+ // Pad with 0 for any immediate values. If they're not needed, they'll be
+ // interpreted as unreachable.
+ byte data[20] = {0};
+
+ printf("%s\n", value.debug_name);
+ switch (value.opcode) {
+ // Instructions that have a special case because they affect the control
+ // depth.
+ case kExprBlock:
+ case kExprLoop:
+ case kExprIf:
+ case kExprTry:
+ data[0] = value.opcode;
+ data[1] = kLocalVoid;
+ data[2] = kExprEnd;
+ break;
+ case kExprElse:
+ data[0] = kExprIf;
+ data[1] = value.opcode;
+ data[2] = kExprEnd;
+ break;
+ case kExprCatch:
+ data[0] = kExprTry;
+ data[1] = value.opcode;
+ data[2] = kExprEnd;
+ break;
+ case kExprEnd:
+ break;
+
+ // Instructions with special requirements for immediates.
+ case kExprSelectWithType:
+ data[0] = kExprSelectWithType;
+ data[1] = 1;
+ data[2] = kLocalI32;
+ break;
+
+ default: {
+ if (value.opcode >= 0x100) {
+ data[0] = value.opcode >> 8; // Prefix byte.
+ byte opcode = value.opcode & 0xff; // Actual opcode.
+ if (opcode >= 0x80) {
+ // Opcode with prefix, and needs to be LEB encoded (3 bytes).
+ // For now, this can only be in the range [0x80, 0xff], which means
+ // that the third byte is always 1.
+ data[1] = (opcode & 0x7f) | 0x80;
+ data[2] = 1;
+ } else {
+ // Opcode with prefix (2 bytes).
+ data[1] = opcode;
+ }
+ } else {
+ // Single-byte opcode.
+ data[0] = value.opcode;
+ }
+ break;
+ }
+ }
+
+ TestInstruction(data, arraysize(data));
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8