summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/compiler/test-run-machops.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/compiler/test-run-machops.cc')
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc815
1 files changed, 503 insertions, 312 deletions
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 2bfe1244be..50b46d7d0e 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -7,8 +7,10 @@
#include <limits>
#include "src/base/bits.h"
+#include "src/base/ieee754.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
+#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
@@ -28,6 +30,39 @@ TEST(RunInt32Add) {
CHECK_EQ(1, m.Call());
}
+static int RunInt32AddShift(bool is_left, int32_t add_left, int32_t add_right,
+ int32_t shift_left, int32_t shit_right) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* shift =
+ m.Word32Shl(m.Int32Constant(shift_left), m.Int32Constant(shit_right));
+ Node* add = m.Int32Add(m.Int32Constant(add_left), m.Int32Constant(add_right));
+ Node* lsa = is_left ? m.Int32Add(shift, add) : m.Int32Add(add, shift);
+ m.Return(lsa);
+ return m.Call();
+}
+
+TEST(RunInt32AddShift) {
+ struct Test_case {
+ int32_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT_MAX - 2, 1, 1, 1, INT_MIN}, // INT_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
TEST(RunWord32ReverseBits) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
@@ -47,6 +82,23 @@ TEST(RunWord32ReverseBits) {
CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
}
+TEST(RunWord32ReverseBytes) {
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ if (!m.machine()->Word32ReverseBytes().IsSupported()) {
+ // We can only test the operator if it exists on the testing platform.
+ return;
+ }
+ m.Return(m.AddNode(m.machine()->Word32ReverseBytes().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
+ CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x78563412)));
+ CHECK_EQ(uint32_t(0xfedcba09), m.Call(uint32_t(0x09badcfe)));
+ CHECK_EQ(uint32_t(0x01010101), m.Call(uint32_t(0x01010101)));
+ CHECK_EQ(uint32_t(0x01020408), m.Call(uint32_t(0x08040201)));
+ CHECK_EQ(uint32_t(0xf0703010), m.Call(uint32_t(0x103070f0)));
+ CHECK_EQ(uint32_t(0x1f8d0a3a), m.Call(uint32_t(0x3a0a8d1f)));
+ CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
+}
TEST(RunWord32Ctz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
@@ -169,6 +221,23 @@ TEST(RunWord64ReverseBits) {
CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
}
+TEST(RunWord64ReverseBytes) {
+ BufferedRawMachineAssemblerTester<uint64_t> m(MachineType::Uint64());
+ if (!m.machine()->Word64ReverseBytes().IsSupported()) {
+ return;
+ }
+
+ m.Return(m.AddNode(m.machine()->Word64ReverseBytes().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
+ CHECK_EQ(uint64_t(0x1234567890abcdef), m.Call(uint64_t(0xefcdab9078563412)));
+ CHECK_EQ(uint64_t(0xfedcba0987654321), m.Call(uint64_t(0x2143658709badcfe)));
+ CHECK_EQ(uint64_t(0x0101010101010101), m.Call(uint64_t(0x0101010101010101)));
+ CHECK_EQ(uint64_t(0x0102040803060c01), m.Call(uint64_t(0x010c060308040201)));
+ CHECK_EQ(uint64_t(0xf0703010e060200f), m.Call(uint64_t(0x0f2060e0103070f0)));
+ CHECK_EQ(uint64_t(0x2f8a6df01c21fa3b), m.Call(uint64_t(0x3bfa211cf06d8a2f)));
+ CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
+}
TEST(RunWord64Clz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
@@ -636,6 +705,38 @@ TEST(RunInt64SubWithOverflowInBranchP) {
}
}
+static int64_t RunInt64AddShift(bool is_left, int64_t add_left,
+ int64_t add_right, int64_t shift_left,
+ int64_t shit_right) {
+ RawMachineAssemblerTester<int64_t> m;
+ Node* shift = m.Word64Shl(m.Int64Constant(4), m.Int64Constant(2));
+ Node* add = m.Int64Add(m.Int64Constant(20), m.Int64Constant(22));
+ Node* dlsa = is_left ? m.Int64Add(shift, add) : m.Int64Add(add, shift);
+ m.Return(dlsa);
+ return m.Call();
+}
+
+TEST(RunInt64AddShift) {
+ struct Test_case {
+ int64_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT64_MAX - 2, 1, 1, 1,
+ INT64_MIN}, // INT64_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(58, RunInt64AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(58, RunInt64AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TARGET_ARCH_64_BIT
@@ -1142,94 +1243,6 @@ TEST(RunSwitch4) {
}
-TEST(RunLoadInt32) {
- RawMachineAssemblerTester<int32_t> m;
-
- int32_t p1 = 0; // loads directly from this location.
- m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
-
- FOR_INT32_INPUTS(i) {
- p1 = *i;
- CHECK_EQ(p1, m.Call());
- }
-}
-
-
-TEST(RunLoadInt32Offset) {
- int32_t p1 = 0; // loads directly from this location.
-
- int32_t offsets[] = {-2000000, -100, -101, 1, 3,
- 7, 120, 2000, 2000000000, 0xff};
-
- for (size_t i = 0; i < arraysize(offsets); i++) {
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = offsets[i];
- byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
- // generate load [#base + #index]
- m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
-
- FOR_INT32_INPUTS(j) {
- p1 = *j;
- CHECK_EQ(p1, m.Call());
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat32Offset) {
- float p1 = 0.0f; // loads directly from this location.
- float p2 = 0.0f; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT32_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat64Offset) {
- double p1 = 0; // loads directly from this location.
- double p2 = 0; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT64_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
TEST(RunInt32AddP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
@@ -1709,7 +1722,6 @@ TEST(RunInt32SubP) {
}
}
-
TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
@@ -1733,6 +1745,11 @@ TEST(RunInt32SubImm) {
}
}
+TEST(RunInt32SubImm2) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.Int32Constant(-1), r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunInt32SubAndWord32SarP) {
{
@@ -2099,7 +2116,6 @@ TEST(RunInt32MulImm) {
}
}
-
TEST(RunInt32MulAndInt32AddP) {
{
FOR_INT32_INPUTS(i) {
@@ -3566,92 +3582,6 @@ TEST(RunDeadInt32Binops) {
}
-template <typename Type>
-static void RunLoadImmIndex(MachineType rep) {
- const int kNumElems = 3;
- Type buffer[kNumElems];
-
- // initialize the buffer with some raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- // Test with various large and small offsets.
- for (int offset = -1; offset <= 200000; offset *= -5) {
- for (int i = 0; i < kNumElems; i++) {
- BufferedRawMachineAssemblerTester<Type> m;
- Node* base = m.PointerConstant(buffer - offset);
- Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
- m.Return(m.Load(rep, base, index));
-
- volatile Type expected = buffer[i];
- volatile Type actual = m.Call();
- CHECK_EQ(expected, actual);
- }
- }
-}
-
-
-TEST(RunLoadImmIndex) {
- RunLoadImmIndex<int8_t>(MachineType::Int8());
- RunLoadImmIndex<uint8_t>(MachineType::Uint8());
- RunLoadImmIndex<int16_t>(MachineType::Int16());
- RunLoadImmIndex<uint16_t>(MachineType::Uint16());
- RunLoadImmIndex<int32_t>(MachineType::Int32());
- RunLoadImmIndex<uint32_t>(MachineType::Uint32());
- RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
- RunLoadImmIndex<float>(MachineType::Float32());
- RunLoadImmIndex<double>(MachineType::Float64());
- if (kPointerSize == 8) {
- RunLoadImmIndex<int64_t>(MachineType::Int64());
- }
- // TODO(titzer): test various indexing modes.
-}
-
-
-template <typename CType>
-static void RunLoadStore(MachineType rep) {
- const int kNumElems = 4;
- CType buffer[kNumElems];
-
- for (int32_t x = 0; x < kNumElems; x++) {
- int32_t y = kNumElems - x - 1;
- // initialize the buffer with raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- RawMachineAssemblerTester<int32_t> m;
- int32_t OK = 0x29000 + x;
- Node* base = m.PointerConstant(buffer);
- Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
- Node* load = m.Load(rep, base, index0);
- Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
- m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
- m.Return(m.Int32Constant(OK));
-
- CHECK(buffer[x] != buffer[y]);
- CHECK_EQ(OK, m.Call());
- CHECK(buffer[x] == buffer[y]);
- }
-}
-
-
-TEST(RunLoadStore) {
- RunLoadStore<int8_t>(MachineType::Int8());
- RunLoadStore<uint8_t>(MachineType::Uint8());
- RunLoadStore<int16_t>(MachineType::Int16());
- RunLoadStore<uint16_t>(MachineType::Uint16());
- RunLoadStore<int32_t>(MachineType::Int32());
- RunLoadStore<uint32_t>(MachineType::Uint32());
- RunLoadStore<void*>(MachineType::AnyTagged());
- RunLoadStore<float>(MachineType::Float32());
- RunLoadStore<double>(MachineType::Float64());
-}
-
-
TEST(RunFloat32Add) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
MachineType::Float32());
@@ -3673,6 +3603,11 @@ TEST(RunFloat32Sub) {
}
}
+TEST(RunFloat32Neg) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Float32());
+ m.Return(m.AddNode(m.machine()->Float32Neg(), m.Parameter(0)));
+ FOR_FLOAT32_INPUTS(i) { CHECK_FLOAT_EQ(-0.0f - *i, m.Call(*i)); }
+}
TEST(RunFloat32Mul) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
@@ -3717,6 +3652,11 @@ TEST(RunFloat64Sub) {
}
}
+TEST(RunFloat64Neg) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.AddNode(m.machine()->Float64Neg(), m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_FLOAT_EQ(-0.0 - *i, m.Call(*i)); }
+}
TEST(RunFloat64Mul) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
@@ -3808,67 +3748,55 @@ TEST(RunFloat64AddP) {
}
}
-
-TEST(RunFloa32MaxP) {
+TEST(RunFloat64MaxP) {
RawMachineAssemblerTester<int32_t> m;
- Float32BinopTester bt(&m);
- if (!m.machine()->Float32Max().IsSupported()) return;
-
- bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
+ Float64BinopTester bt(&m);
+ bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
- FOR_FLOAT32_INPUTS(pl) {
- FOR_FLOAT32_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
+ FOR_FLOAT64_INPUTS(pl) {
+ FOR_FLOAT64_INPUTS(pr) {
+ CHECK_DOUBLE_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-TEST(RunFloat64MaxP) {
+TEST(RunFloat64MinP) {
RawMachineAssemblerTester<int32_t> m;
Float64BinopTester bt(&m);
- if (!m.machine()->Float64Max().IsSupported()) return;
-
- bt.AddReturn(m.Float64Max(bt.param0, bt.param1));
+ bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
FOR_FLOAT64_INPUTS(pl) {
FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl > *pr ? *pl : *pr, bt.call(*pl, *pr));
+ CHECK_DOUBLE_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
-TEST(RunFloat32MinP) {
+TEST(RunFloat32Max) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
- if (!m.machine()->Float32Min().IsSupported()) return;
-
- bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
+ bt.AddReturn(m.Float32Max(bt.param0, bt.param1));
FOR_FLOAT32_INPUTS(pl) {
FOR_FLOAT32_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
+ CHECK_FLOAT_EQ(JSMax(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
-TEST(RunFloat64MinP) {
+TEST(RunFloat32Min) {
RawMachineAssemblerTester<int32_t> m;
- Float64BinopTester bt(&m);
- if (!m.machine()->Float64Min().IsSupported()) return;
-
- bt.AddReturn(m.Float64Min(bt.param0, bt.param1));
+ Float32BinopTester bt(&m);
+ bt.AddReturn(m.Float32Min(bt.param0, bt.param1));
- FOR_FLOAT64_INPUTS(pl) {
- FOR_FLOAT64_INPUTS(pr) {
- CHECK_DOUBLE_EQ(*pl < *pr ? *pl : *pr, bt.call(*pl, *pr));
+ FOR_FLOAT32_INPUTS(pl) {
+ FOR_FLOAT32_INPUTS(pr) {
+ CHECK_FLOAT_EQ(JSMin(*pl, *pr), bt.call(*pl, *pr));
}
}
}
-
TEST(RunFloat32SubP) {
RawMachineAssemblerTester<int32_t> m;
Float32BinopTester bt(&m);
@@ -4110,9 +4038,15 @@ TEST(RunChangeUint32ToFloat64) {
TEST(RunTruncateFloat32ToInt32) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Float32());
m.Return(m.TruncateFloat32ToInt32(m.Parameter(0)));
+ // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
+ // number above INT32_MAX which cannot be represented as int32.
+ float upper_bound = 2147483648.0f;
+ // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
+ // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
+ // is.
+ float lower_bound = static_cast<float>(INT32_MIN);
FOR_FLOAT32_INPUTS(i) {
- if (*i <= static_cast<float>(std::numeric_limits<int32_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<int32_t>::min())) {
+ if (*i < upper_bound && *i >= lower_bound) {
CHECK_FLOAT_EQ(static_cast<int32_t>(*i), m.Call(*i));
}
}
@@ -4122,23 +4056,20 @@ TEST(RunTruncateFloat32ToInt32) {
TEST(RunTruncateFloat32ToUint32) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float32());
m.Return(m.TruncateFloat32ToUint32(m.Parameter(0)));
- {
- FOR_UINT32_INPUTS(i) {
- float input = static_cast<float>(*i);
- // This condition on 'input' is required because
- // static_cast<float>(std::numeric_limits<uint32_t>::max()) results in a
- // value outside uint32 range.
- if (input < static_cast<float>(std::numeric_limits<uint32_t>::max())) {
- CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
- }
+ // The upper bound is (UINT32_MAX + 1), which is the lowest
+ // float-representable number above UINT32_MAX which cannot be represented as
+ // uint32.
+ double upper_bound = 4294967296.0f;
+ double lower_bound = -1.0f;
+ FOR_UINT32_INPUTS(i) {
+ volatile float input = static_cast<float>(*i);
+ if (input < upper_bound) {
+ CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
}
}
- {
- FOR_FLOAT32_INPUTS(i) {
- if (*i <= static_cast<float>(std::numeric_limits<uint32_t>::max()) &&
- *i >= static_cast<float>(std::numeric_limits<uint32_t>::min())) {
- CHECK_FLOAT_EQ(static_cast<uint32_t>(*i), m.Call(*i));
- }
+ FOR_FLOAT32_INPUTS(j) {
+ if ((*j < upper_bound) && (*j > lower_bound)) {
+ CHECK_FLOAT_EQ(static_cast<uint32_t>(*j), m.Call(*j));
}
}
}
@@ -4201,7 +4132,7 @@ uint64_t ToInt64(uint32_t low, uint32_t high) {
return (static_cast<uint64_t>(high) << 32) | static_cast<uint64_t>(low);
}
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X87
TEST(RunInt32PairAdd) {
BufferedRawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
@@ -4449,6 +4380,56 @@ TEST(RunWord32PairShlWithSharedInput) {
TestWord32PairShlWithSharedInput(1, 1);
}
+TEST(RunWord32PairShr) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairShr(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunWord32PairSar) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairSar(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_INT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
#endif
TEST(RunDeadChangeFloat64ToInt32) {
@@ -4968,45 +4949,6 @@ TEST(RunFloat64LessThan) {
}
-template <typename IntType>
-static void LoadStoreTruncation(MachineType kRepresentation) {
- IntType input;
-
- RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input, kRepresentation);
- Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
- m.StoreToPointer(&input, kRepresentation.representation(), ap1);
- m.Return(ap1);
-
- const IntType max = std::numeric_limits<IntType>::max();
- const IntType min = std::numeric_limits<IntType>::min();
-
- // Test upper bound.
- input = max;
- CHECK_EQ(max + 1, m.Call());
- CHECK_EQ(min, input);
-
- // Test lower bound.
- input = min;
- CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
- CHECK_EQ(min + 1, input);
-
- // Test all one byte values that are not one byte bounds.
- for (int i = -127; i < 127; i++) {
- input = i;
- int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
- CHECK_EQ(static_cast<IntType>(expected), m.Call());
- CHECK_EQ(static_cast<IntType>(i + 1), input);
- }
-}
-
-
-TEST(RunLoadStoreTruncation) {
- LoadStoreTruncation<int8_t>(MachineType::Int8());
- LoadStoreTruncation<int16_t>(MachineType::Int16());
-}
-
-
static void IntPtrCompare(intptr_t left, intptr_t right) {
for (int test = 0; test < 7; test++) {
RawMachineAssemblerTester<bool> m(MachineType::Pointer(),
@@ -5352,6 +5294,98 @@ TEST(RunInt32SubWithOverflowInBranchP) {
}
}
+TEST(RunInt32MulWithOverflowP) {
+ int32_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* add = m.Int32MulWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ bt.AddReturn(ovf);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected_val;
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+}
+
+TEST(RunInt32MulWithOverflowImm) {
+ int32_t actual_val = -1, expected_val = 0;
+ FOR_INT32_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* add = m.Int32MulWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* add = m.Int32MulWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* add =
+ m.Int32MulWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
+ m.StoreToPointer(&actual_val, MachineRepresentation::kWord32, val);
+ m.Return(ovf);
+ int expected_ovf = bits::SignedMulOverflow32(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ if (!expected_ovf) {
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ }
+}
+
+TEST(RunInt32MulWithOverflowInBranchP) {
+ int constant = 911777;
+ RawMachineLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* add = m.Int32MulWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, add);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Int32Constant(constant));
+ m.Bind(&blockb);
+ Node* val = m.Projection(0, add);
+ bt.AddReturn(val);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected;
+ if (bits::SignedMulOverflow32(*i, *j, &expected)) expected = constant;
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
TEST(RunWord64EqualInBranchP) {
int64_t input;
@@ -5417,8 +5451,7 @@ TEST(RunTruncateInt64ToInt32P) {
}
}
-
-TEST(RunTruncateFloat64ToInt32P) {
+TEST(RunTruncateFloat64ToWord32P) {
struct {
double from;
double raw;
@@ -5479,8 +5512,7 @@ TEST(RunTruncateFloat64ToInt32P) {
{-1.7976931348623157e+308, 0}};
double input = -1.0;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.TruncateFloat64ToInt32(
- TruncationMode::kJavaScript,
+ m.Return(m.TruncateFloat64ToWord32(
m.LoadFromPointer(&input, MachineType::Float64())));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i].from;
@@ -5489,6 +5521,12 @@ TEST(RunTruncateFloat64ToInt32P) {
}
}
+TEST(RunTruncateFloat64ToWord32SignExtension) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.TruncateFloat64ToWord32(r.Float64Constant(-1.0)),
+ r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunChangeFloat32ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float32());
@@ -5573,6 +5611,204 @@ TEST(RunFloat64Abs) {
FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(std::abs(*i), m.Call(*i)); }
}
+TEST(RunFloat64Acos) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Acos(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::acos(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Acosh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Acosh(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::acosh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Asin) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Asin(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::asin(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Asinh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Asinh(m.Parameter(0)));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::asinh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atan) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Atan(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::atan(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atanh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Atanh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(), m.Call(1.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-1.0));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::atanh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Atan2) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(m.Float64Atan2(m.Parameter(0), m.Parameter(1)));
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_FLOAT64_INPUTS(j) {
+ CHECK_DOUBLE_EQ(ieee754::atan2(*i, *j), m.Call(*i, *j));
+ }
+ }
+}
+
+TEST(RunFloat64Cos) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cos(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cos(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Cosh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cosh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cosh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Exp) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Exp(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_EQ(0.0, m.Call(-std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(1.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(1.0, m.Call(0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::exp(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Expm1) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Expm1(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_EQ(-1.0, m.Call(-std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::expm1(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log1p) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log1p(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-1.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(0.0));
+ CHECK_DOUBLE_EQ(-0.0, m.Call(-0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log1p(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log2) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log2(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(0.0, m.Call(1.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log2(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Log10) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Log10(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK(std::isnan(m.Call(-std::numeric_limits<double>::infinity())));
+ CHECK(std::isnan(m.Call(-1.0)));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(-0.0));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(), m.Call(0.0));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::log10(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Cbrt) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Cbrt(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ CHECK_DOUBLE_EQ(std::numeric_limits<double>::infinity(),
+ m.Call(std::numeric_limits<double>::infinity()));
+ CHECK_DOUBLE_EQ(-std::numeric_limits<double>::infinity(),
+ m.Call(-std::numeric_limits<double>::infinity()));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::cbrt(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Sin) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Sin(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::sin(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Sinh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Sinh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::sinh(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Tan) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Tan(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::tan(*i), m.Call(*i)); }
+}
+
+TEST(RunFloat64Tanh) {
+ BufferedRawMachineAssemblerTester<double> m(MachineType::Float64());
+ m.Return(m.Float64Tanh(m.Parameter(0)));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::quiet_NaN())));
+ CHECK(std::isnan(m.Call(std::numeric_limits<double>::signaling_NaN())));
+ FOR_FLOAT64_INPUTS(i) { CHECK_DOUBLE_EQ(ieee754::tanh(*i), m.Call(*i)); }
+}
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
@@ -5854,50 +6090,6 @@ TEST(RunCallCFunction8) {
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
-TEST(RunCheckedLoadInt64) {
- int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
- RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
- index, length);
- m.Return(load);
-
- CHECK_EQ(buffer[0], m.Call(0));
- CHECK_EQ(buffer[1], m.Call(8));
- CHECK_EQ(0, m.Call(16));
-}
-
-
-TEST(RunCheckedStoreInt64) {
- const int64_t write = 0x5566778899aabbLL;
- const int64_t before = 0x33bbccddeeff0011LL;
- int64_t buffer[] = {before, before};
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* value = m.Int64Constant(write);
- Node* store =
- m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
- index, length, value);
- USE(store);
- m.Return(m.Int32Constant(11));
-
- CHECK_EQ(11, m.Call(16));
- CHECK_EQ(before, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(0));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(8));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(write, buffer[1]);
-}
-
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
@@ -6348,7 +6540,6 @@ TEST(RunComputedCodeObject) {
CallDescriptor::kCallCodeObject, // kind
MachineType::AnyTagged(), // target_type
c->GetInputLocation(0), // target_loc
- &sig, // machine_sig
&loc, // location_sig
0, // stack count
Operator::kNoProperties, // properties