summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/test-macro-assembler-mips.cc
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2016-09-06 22:49:51 +0200
committerMichaël Zasso <targos@protonmail.com>2016-09-22 09:51:19 +0200
commitec02b811a8a5c999bab4de312be2d732b7d9d50b (patch)
treeca3068017254f238cf413a451c57a803572983a4 /deps/v8/test/cctest/test-macro-assembler-mips.cc
parentd2eb7ce0105369a9cad82787cb33a665e9bd00ad (diff)
downloadnode-new-ec02b811a8a5c999bab4de312be2d732b7d9d50b.tar.gz
deps: update V8 to 5.4.500.27
Pick up latest commit from the 5.4-lkgr branch. deps: edit V8 gitignore to allow trace event copy deps: update V8 trace event to 315bf1e2d45be7d53346c31cfcc37424a32c30c8 deps: edit V8 gitignore to allow gtest_prod.h copy deps: update V8 gtest to 6f8a66431cb592dad629028a50b3dd418a408c87 PR-URL: https://github.com/nodejs/node/pull/8317 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Diffstat (limited to 'deps/v8/test/cctest/test-macro-assembler-mips.cc')
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc745
1 files changed, 737 insertions, 8 deletions
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 91ee215315..057c370304 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -60,6 +60,71 @@ static bool all_zeroes(const byte* beg, const byte* end) {
return true;
}
+TEST(BYTESWAP) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ struct T {
+ int32_t r1;
+ int32_t r2;
+ int32_t r3;
+ int32_t r4;
+ int32_t r5;
+ };
+ T t;
+
+ MacroAssembler assembler(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r1)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 4);
+ __ sw(a2, MemOperand(a0, offsetof(T, r1)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r2)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 2);
+ __ sw(a2, MemOperand(a0, offsetof(T, r2)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r3)));
+ __ nop();
+ __ ByteSwapSigned(a2, a2, 1);
+ __ sw(a2, MemOperand(a0, offsetof(T, r3)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r4)));
+ __ nop();
+ __ ByteSwapUnsigned(a2, a2, 1);
+ __ sw(a2, MemOperand(a0, offsetof(T, r4)));
+
+ __ lw(a2, MemOperand(a0, offsetof(T, r5)));
+ __ nop();
+ __ ByteSwapUnsigned(a2, a2, 2);
+ __ sw(a2, MemOperand(a0, offsetof(T, r5)));
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ masm->GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ ::F3 f = FUNCTION_CAST<::F3>(code->entry());
+ t.r1 = 0x781A15C3;
+ t.r2 = 0x2CDE;
+ t.r3 = 0x9F;
+ t.r4 = 0x9F;
+ t.r5 = 0x2CDE;
+ Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(static_cast<int32_t>(0xC3151A78), t.r1);
+ CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0x9FFFFFFF), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x9F000000), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xDE2C0000), t.r5);
+}
TEST(CopyBytes) {
CcTest::InitializeVM();
@@ -390,14 +455,14 @@ TEST(Lsa) {
}
}
-static const std::vector<uint32_t> uint32_test_values() {
+static const std::vector<uint32_t> cvt_trunc_uint32_test_values() {
static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00ffff00,
0x7fffffff, 0x80000000, 0x80000001,
0x80ffff00, 0x8fffffff, 0xffffffff};
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
-static const std::vector<int32_t> int32_test_values() {
+static const std::vector<int32_t> cvt_trunc_int32_test_values() {
static const int32_t kValues[] = {
static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
static_cast<int32_t>(0x00ffff00), static_cast<int32_t>(0x7fffffff),
@@ -408,13 +473,31 @@ static const std::vector<int32_t> int32_test_values() {
}
// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
-#define FOR_INPUTS(ctype, itype, var) \
- std::vector<ctype> var##_vec = itype##_test_values(); \
+#define FOR_INPUTS(ctype, itype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
for (std::vector<ctype>::iterator var = var##_vec.begin(); \
var != var##_vec.end(); ++var)
-#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
-#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var != var##_vec.end(); ++var, ++var2)
+
+#define FOR_ENUM_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(enum type, type, var, test_vector)
+#define FOR_STRUCT_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(struct type, type, var, test_vector)
+#define FOR_UINT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint32_t, uint32, var, test_vector)
+#define FOR_INT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(int32_t, int32, var, test_vector)
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
+
+#define FOR_UINT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint64_t, uint32, var, test_vector)
template <typename RET_TYPE, typename IN_TYPE, typename Func>
RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
@@ -445,7 +528,7 @@ RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) {
TEST(cvt_s_w_Trunc_uw_s) {
CcTest::InitializeVM();
- FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) {
uint32_t input = *i;
CHECK_EQ(static_cast<float>(input),
run_Cvt<uint32_t>(input, [](MacroAssembler* masm) {
@@ -457,7 +540,7 @@ TEST(cvt_s_w_Trunc_uw_s) {
TEST(cvt_d_w_Trunc_w_d) {
CcTest::InitializeVM();
- FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) {
int32_t input = *i;
CHECK_EQ(static_cast<double>(input),
run_Cvt<int32_t>(input, [](MacroAssembler* masm) {
@@ -467,6 +550,415 @@ TEST(cvt_d_w_Trunc_w_d) {
}
}
+static const std::vector<int32_t> overflow_int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0xf0000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0xff000000), static_cast<int32_t>(0x0000f000),
+ static_cast<int32_t>(0x0f000000), static_cast<int32_t>(0x991234ab),
+ static_cast<int32_t>(0xb0ffff01), static_cast<int32_t>(0x00006fff),
+ static_cast<int32_t>(0xffffffff)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+enum OverflowBranchType {
+ kAddBranchOverflow,
+ kSubBranchOverflow,
+};
+
+struct OverflowRegisterCombination {
+ Register dst;
+ Register left;
+ Register right;
+ Register scratch;
+};
+
+static const std::vector<enum OverflowBranchType> overflow_branch_type() {
+ static const enum OverflowBranchType kValues[] = {kAddBranchOverflow,
+ kSubBranchOverflow};
+ return std::vector<enum OverflowBranchType>(&kValues[0],
+ &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<struct OverflowRegisterCombination>
+overflow_register_combination() {
+ static const struct OverflowRegisterCombination kValues[] = {
+ {t0, t1, t2, t3}, {t0, t0, t2, t3}, {t0, t1, t0, t3}, {t0, t1, t1, t3}};
+ return std::vector<struct OverflowRegisterCombination>(
+ &kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename T>
+static bool IsAddOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (x > 0 && y > (max - x)) || (x < 0 && y < (min - x));
+}
+
+template <typename T>
+static bool IsSubOverflow(T x, T y) {
+ DCHECK(std::numeric_limits<T>::is_integer);
+ T max = std::numeric_limits<T>::max();
+ T min = std::numeric_limits<T>::min();
+
+ return (y > 0 && x < (min + y)) || (y < 0 && x > (max + y));
+}
+
+template <typename IN_TYPE, typename Func>
+static bool runOverflow(IN_TYPE valLeft, IN_TYPE valRight,
+ Func GenerateOverflowInstructions) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateOverflowInstructions(masm, valLeft, valRight);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ int32_t r =
+ reinterpret_cast<int32_t>(CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ DCHECK(r == 0 || r == 1);
+ return r;
+}
+
+TEST(BranchOverflowInt32BothLabelsTrampoline) {
+ if (!IsMipsArchVariant(kMips32r6)) return;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+
+ Label done;
+ size_t nr_calls =
+ kMaxBranchOffset / (2 * Instruction::kInstrSize) + 2;
+ for (size_t i = 0; i < nr_calls; ++i) {
+ __ BranchShort(&done, eq, a0, Operand(a1));
+ }
+ __ bind(&done);
+
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32BothLabels) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 2);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32LeftLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, &overflow, NULL,
+ rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight),
+ &overflow, NULL, rc.scratch);
+ break;
+ }
+ __ li(v0, 0);
+ __ Branch(&end);
+ __ bind(&overflow);
+ __ li(v0, 1);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
+TEST(BranchOverflowInt32RightLabel) {
+ FOR_INT32_INPUTS(i, overflow_int32_test_values) {
+ FOR_INT32_INPUTS(j, overflow_int32_test_values) {
+ FOR_ENUM_INPUTS(br, OverflowBranchType, overflow_branch_type) {
+ FOR_STRUCT_INPUTS(regComb, OverflowRegisterCombination,
+ overflow_register_combination) {
+ int32_t ii = *i;
+ int32_t jj = *j;
+ enum OverflowBranchType branchType = *br;
+ struct OverflowRegisterCombination rc = *regComb;
+
+ // If left and right register are same then left and right
+ // test values must also be same, otherwise we skip the test
+ if (rc.left.code() == rc.right.code()) {
+ if (ii != jj) {
+ continue;
+ }
+ }
+
+ bool res1 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ __ li(rc.right, valRight);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, rc.right, NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ bool res2 = runOverflow<int32_t>(
+ ii, jj, [branchType, rc](MacroAssembler* masm, int32_t valLeft,
+ int32_t valRight) {
+ Label no_overflow, end;
+ __ li(rc.left, valLeft);
+ switch (branchType) {
+ case kAddBranchOverflow:
+ __ AddBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ case kSubBranchOverflow:
+ __ SubBranchOvf(rc.dst, rc.left, Operand(valRight), NULL,
+ &no_overflow, rc.scratch);
+ break;
+ }
+ __ li(v0, 1);
+ __ Branch(&end);
+ __ bind(&no_overflow);
+ __ li(v0, 0);
+ __ bind(&end);
+ });
+
+ switch (branchType) {
+ case kAddBranchOverflow:
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsAddOverflow<int32_t>(ii, jj), res2);
+ break;
+ case kSubBranchOverflow:
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res1);
+ CHECK_EQ(IsSubOverflow<int32_t>(ii, jj), res2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+ }
+ }
+}
+
TEST(min_max_nan) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -577,4 +1069,241 @@ TEST(min_max_nan) {
}
}
+template <typename IN_TYPE, typename Func>
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
+ typedef int32_t (*F_CVT)(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, nullptr, 0,
+ v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+ IN_TYPE res;
+
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F_CVT f = FUNCTION_CAST<F_CVT>(code->entry());
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
+ CALL_GENERATED_CODE(isolate, f, memory_buffer, 0, 0, 0, 0);
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
+
+ return res == value;
+}
+
+static const std::vector<uint64_t> unsigned_test_values() {
+ static const uint64_t kValues[] = {
+ 0x2180f18a06384414, 0x000a714532102277, 0xbc1acccf180649f0,
+ 0x8000000080008000, 0x0000000000000001, 0xffffffffffffffff,
+ };
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(Ulh) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulh(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulh(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), v0);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulhu(a0, MemOperand(a0, in_offset));
+ __ Ush(a0, MemOperand(t0, out_offset), t1);
+ }));
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulhu(v0, MemOperand(a0, in_offset));
+ __ Ush(v0, MemOperand(a0, out_offset), t1);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulh_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint16_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ulhu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ sra(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ sra(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ sra(t0, t0, 15);
+ __ addiu(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ulh(t0, MemOperand(a0, in_offset));
+ __ Ush(t0, MemOperand(a0, out_offset), v0);
+ __ Branch(&end);
+ __ bind(&fail);
+ __ Ush(zero_reg, MemOperand(a0, out_offset), v0);
+ __ bind(&end);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulw) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulw(v0, MemOperand(a0, in_offset));
+ __ Usw(v0, MemOperand(a0, out_offset));
+ }));
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(
+ buffer_middle, in_offset, out_offset, (uint32_t)value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ulw(a0, MemOperand(a0, in_offset));
+ __ Usw(a0, MemOperand(t0, out_offset));
+ }));
+ }
+ }
+ }
+}
+
+TEST(Ulwc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ float value = static_cast<float>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<float>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ulwc1(f0, MemOperand(a0, in_offset), t0);
+ __ Uswc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
+TEST(Uldc1) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ double value = static_cast<double>(*i);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ CHECK_EQ(true, run_Unaligned<double>(
+ buffer_middle, in_offset, out_offset, value,
+ [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Uldc1(f0, MemOperand(a0, in_offset), t0);
+ __ Usdc1(f0, MemOperand(a0, out_offset), t0);
+ }));
+ }
+ }
+ }
+}
+
#undef __