summaryrefslogtreecommitdiff
path: root/deps/v8/test/cctest/test-assembler-riscv64.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/test/cctest/test-assembler-riscv64.cc')
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv64.cc123
1 files changed, 103 insertions, 20 deletions
diff --git a/deps/v8/test/cctest/test-assembler-riscv64.cc b/deps/v8/test/cctest/test-assembler-riscv64.cc
index 898929b16d..c5a937d0e6 100644
--- a/deps/v8/test/cctest/test-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-assembler-riscv64.cc
@@ -381,10 +381,10 @@ UTEST_R2_FORM_WITH_OP(sra, int64_t, -0x1234'5678'0000'0000LL, 33, >>)
// -- CSR --
UTEST_CSRI(csr_frm, DYN, RUP)
UTEST_CSRI(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSRI(csr_fcsr, kDivideByZero | kOverflow, kUnderflow)
+UTEST_CSRI(csr_fcsr, kDivideByZero | kFPUOverflow, kUnderflow)
UTEST_CSR(csr_frm, DYN, RUP)
UTEST_CSR(csr_fflags, kInexact | kInvalidOperation, kInvalidOperation)
-UTEST_CSR(csr_fcsr, kDivideByZero | kOverflow | (RDN << kFcsrFrmShift),
+UTEST_CSR(csr_fcsr, kDivideByZero | kFPUOverflow | (RDN << kFcsrFrmShift),
kUnderflow | (RNE << kFcsrFrmShift))
// -- RV64I --
@@ -598,6 +598,40 @@ TEST(RISCV0) {
}
}
+TEST(RISCVLi) {
+ CcTest::InitializeVM();
+
+ FOR_INT64_INPUTS(i) {
+ auto fn = [i](MacroAssembler& assm) { __ RecursiveLi(a0, i); };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(i, res);
+ }
+ for (int i = 0; i < 64; i++) {
+ auto fn = [i](MacroAssembler& assm) { __ RecursiveLi(a0, 1 << i); };
+ auto res = GenAndRunTest(fn);
+ CHECK_EQ(1 << i, res);
+ }
+}
+
+TEST(RISCVLiEstimate) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ FOR_INT64_INPUTS(i) {
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label a, b;
+ assm.bind(&a);
+ assm.RecordComment("V8 RV_li");
+ assm.RV_li(a0, i);
+ int count_a = assm.InstructionsGeneratedSince(&a);
+ assm.bind(&b);
+ assm.RecordComment("LLVM li");
+ assm.RecursiveLi(a0, i);
+ int count_b = assm.InstructionsGeneratedSince(&b);
+ CHECK_LE(count_a, count_b);
+ }
+}
+
TEST(RISCV1) {
CcTest::InitializeVM();
@@ -825,7 +859,7 @@ TEST(RISCV3) {
__ fsqrt_s(ft5, ft4);
__ fsw(ft5, a0, offsetof(T, fg));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
// Double test values.
t.a = 1.5e14;
@@ -898,7 +932,7 @@ TEST(RISCV4) {
__ sd(a4, a0, offsetof(T, e));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e22;
t.b = 2.75e11;
@@ -949,7 +983,7 @@ TEST(RISCV5) {
__ fcvt_d_l(fa1, a5);
__ fsd(fa1, a0, offsetof(T, b));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e4;
t.b = 2.75e8;
@@ -1007,7 +1041,7 @@ TEST(RISCV6) {
__ lhu(t1, a0, offsetof(T, si));
__ sh(t1, a0, offsetof(T, r6));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.ui = 0x11223344;
t.si = 0x99AABBCC;
@@ -1123,7 +1157,7 @@ TEST(RISCV7) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1180,6 +1214,28 @@ TEST(NAN_BOX) {
CHECK_EQ((uint64_t)base::bit_cast<uint32_t>(1234.56f), res);
}
+ // Test NaN boxing in FMV.S
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ fmv_w_x(fa0, a0);
+ __ fmv_s(ft1, fa0);
+ __ fmv_s(fa0, ft1);
+ };
+ auto res = GenAndRunTest<uint32_t>(0x7f400000, fn);
+ CHECK_EQ((uint32_t)base::bit_cast<uint32_t>(0x7f400000), res);
+ }
+
+ // Test NaN boxing in FMV.D
+ {
+ auto fn = [](MacroAssembler& assm) {
+ __ fmv_d_x(fa0, a0);
+ __ fmv_d(ft1, fa0);
+ __ fmv_d(fa0, ft1);
+ };
+ auto res = GenAndRunTest<uint64_t>(0x7ff4000000000000, fn);
+ CHECK_EQ((uint64_t)base::bit_cast<uint64_t>(0x7ff4000000000000), res);
+ }
+
// Test FLW and FSW
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1198,7 +1254,7 @@ TEST(NAN_BOX) {
// Check only transfer low 32bits when fsw
__ fsw(fa0, a0, offsetof(T, res));
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = -123.45;
t.box = 0;
@@ -1417,7 +1473,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ fadd_d(fa2, fa1, fa0);
__ c_fsd(fa2, a0, offsetof(T, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 1.5e14;
@@ -1442,7 +1498,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ add(a3, a1, a2);
__ c_sw(a3, a0, offsetof(S, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
s.a = 1;
s.b = 2;
@@ -1466,7 +1522,7 @@ TEST(RVC_LOAD_STORE_COMPRESSED) {
__ add(a3, a1, a2);
__ c_sd(a3, a0, offsetof(U, c)); // c = a + b.
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
u.a = 1;
u.b = 2;
@@ -1582,7 +1638,7 @@ TEST(RVC_CB_BRANCH) {
__ bind(&outa_here);
};
- auto f = AssembleCode<F3>(fn);
+ auto f = AssembleCode<F3>(isolate, fn);
t.a = 1.5e14;
t.b = 2.75e11;
@@ -1833,7 +1889,7 @@ TEST(jump_tables1) {
CHECK_EQ(0, assm.UnboundLabelsCount());
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
@@ -1883,7 +1939,7 @@ TEST(jump_tables2) {
__ Ld(ra, MemOperand(sp));
__ addi(sp, sp, 8);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
@@ -1943,7 +1999,7 @@ TEST(jump_tables3) {
__ Ld(ra, MemOperand(sp));
__ addi(sp, sp, 8);
};
- auto f = AssembleCode<F1>(fn);
+ auto f = AssembleCode<F1>(isolate, fn);
for (int i = 0; i < kNumCases; ++i) {
Handle<Object> result(
@@ -1961,7 +2017,7 @@ TEST(li_estimate) {
std::vector<int64_t> immediates = {
-256, -255, 0, 255, 8192, 0x7FFFFFFF,
INT32_MIN, INT32_MAX / 2, INT32_MAX, UINT32_MAX, INT64_MAX, INT64_MAX / 2,
- INT64_MIN};
+ INT64_MIN, 12312874234};
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -1970,7 +2026,7 @@ TEST(li_estimate) {
Label a;
assm.bind(&a);
assm.RV_li(t0, p);
- int expected_count = assm.li_estimate(p, true);
+ int expected_count = assm.RV_li_count(p, true);
int count = assm.InstructionsGeneratedSince(&a);
CHECK_EQ(count, expected_count);
}
@@ -2254,11 +2310,14 @@ UTEST_RVV_VI_VX_FORM_WITH_FN(vminu_vx, 32, ARRAY_INT32, std::min<uint32_t>)
#define UTEST_RVV_VF_VV_FORM_WITH_OP(instr_name, tested_op) \
UTEST_RVV_VF_VV_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
-#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, tested_op) \
- UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, ((rs1_fval)tested_op(rs2_fval)))
+#define UTEST_RVV_VF_VF_FORM_WITH_OP(instr_name, array, tested_op) \
+ UTEST_RVV_VF_VF_FORM_WITH_RES(instr_name, array, \
+ ((rs1_fval)tested_op(rs2_fval)))
+
+#define ARRAY_FLOAT compiler::ValueHelper::GetVector<float>()
UTEST_RVV_VF_VV_FORM_WITH_OP(vfadd_vv, +)
-// UTEST_RVV_VF_VF_FORM_WITH_OP(vfadd_vf, ARRAY_FLOAT, +)
+UTEST_RVV_VF_VF_FORM_WITH_OP(vfadd_vf, ARRAY_FLOAT, +)
UTEST_RVV_VF_VV_FORM_WITH_OP(vfsub_vv, -)
// UTEST_RVV_VF_VF_FORM_WITH_OP(vfsub_vf, ARRAY_FLOAT, -)
UTEST_RVV_VF_VV_FORM_WITH_OP(vfmul_vv, *)
@@ -2832,6 +2891,30 @@ UTEST_VCPOP_M_WITH_WIDTH(32)
UTEST_VCPOP_M_WITH_WIDTH(16)
UTEST_VCPOP_M_WITH_WIDTH(8)
+TEST(RISCV_UTEST_WasmRvvS128const) {
+ if (!CpuFeatures::IsSupported(RISCV_SIMD)) return;
+ CcTest::InitializeVM();
+ for (uint64_t x : compiler::ValueHelper::GetVector<int64_t>()) {
+ for (uint64_t y : compiler::ValueHelper::GetVector<int64_t>()) {
+ uint64_t src[2] = {x, y};
+ uint8_t vals[16];
+ volatile uint64_t result[kRvvVLEN / 64] = {0};
+ memcpy(vals, src, sizeof(vals));
+ auto fn = [vals, &result](MacroAssembler& assm) {
+ __ Push(kScratchReg);
+ __ WasmRvvS128const(v10, vals);
+ __ li(t1, Operand(int64_t(result)));
+ __ VU.set(t0, VSew::E64, Vlmul::m1);
+ __ vs(v10, t1, 0, VSew::E64);
+ __ Pop(kScratchReg);
+ };
+ GenAndRunTest(fn);
+ CHECK_EQ(result[0], x);
+ CHECK_EQ(result[1], y);
+ }
+ }
+}
+
#undef UTEST_VCPOP_M_WITH_WIDTH
#undef __