summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h')
-rw-r--r--deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h262
1 files changed, 160 insertions, 102 deletions
diff --git a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
index 903cb07ef8..52a024cd72 100644
--- a/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
+++ b/deps/v8/src/wasm/baseline/riscv/liftoff-assembler-riscv.h
@@ -23,31 +23,6 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
-inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
- switch (liftoff_cond) {
- case kEqual:
- return eq;
- case kUnequal:
- return ne;
- case kSignedLessThan:
- return lt;
- case kSignedLessEqual:
- return le;
- case kSignedGreaterThan:
- return gt;
- case kSignedGreaterEqual:
- return ge;
- case kUnsignedLessThan:
- return ult;
- case kUnsignedLessEqual:
- return ule;
- case kUnsignedGreaterThan:
- return ugt;
- case kUnsignedGreaterEqual:
- return uge;
- }
-}
-
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
int offset = pc_offset();
@@ -86,19 +61,20 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
void LiftoffAssembler::AlignFrameSize() {}
void LiftoffAssembler::PatchPrepareStackFrame(
- int offset, SafepointTableBuilder* safepoint_table_builder) {
+ int offset, SafepointTableBuilder* safepoint_table_builder,
+ bool feedback_vector_slot) {
// The frame_size includes the frame marker and the instance slot. Both are
// pushed as part of frame construction, so we don't need to allocate memory
// for them anymore.
int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
// The frame setup builtin also pushes the feedback vector.
- if (v8_flags.wasm_speculative_inlining) {
+ if (feedback_vector_slot) {
frame_size -= kSystemPointerSize;
}
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 256;
- TurboAssembler patching_assembler(
+ MacroAssembler patching_assembler(
nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
@@ -221,7 +197,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
Register instance,
int offset) {
DCHECK_LE(0, offset);
- LoadTaggedPointerField(dst, MemOperand{instance, offset});
+ LoadTaggedField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -231,21 +207,21 @@ void LiftoffAssembler::SpillInstance(Register instance) {
void LiftoffAssembler::ResetOSRTarget() {}
void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_s(dst, src);
+ MacroAssembler::Neg_s(dst, src);
}
void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
- TurboAssembler::Neg_d(dst, src);
+ MacroAssembler::Neg_d(dst, src);
}
void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float32Min(dst, lhs, rhs);
+ MacroAssembler::Float32Min(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float32Max(dst, lhs, rhs);
+ MacroAssembler::Float32Max(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
@@ -255,12 +231,12 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float64Min(dst, lhs, rhs);
+ MacroAssembler::Float64Min(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
- TurboAssembler::Float64Max(dst, lhs, rhs);
+ MacroAssembler::Float64Max(dst, lhs, rhs);
}
void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
@@ -303,17 +279,17 @@ FP_UNOP(f64_sqrt, fsqrt_d)
#undef FP_UNOP
#undef FP_UNOP_RETURN_TRUE
-static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
+static FPUCondition ConditionToConditionCmpFPU(Condition condition) {
switch (condition) {
case kEqual:
return EQ;
- case kUnequal:
+ case kNotEqual:
return NE;
case kUnsignedLessThan:
return LT;
- case kUnsignedGreaterEqual:
+ case kUnsignedGreaterThanEqual:
return GE;
- case kUnsignedLessEqual:
+ case kUnsignedLessThanEqual:
return LE;
case kUnsignedGreaterThan:
return GT;
@@ -323,18 +299,18 @@ static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
UNREACHABLE();
}
-void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
- TurboAssembler::CompareF32(dst, fcond, lhs, rhs);
+ FPUCondition fcond = ConditionToConditionCmpFPU(cond);
+ MacroAssembler::CompareF32(dst, fcond, lhs, rhs);
}
-void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
- Register dst, DoubleRegister lhs,
+void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
+ DoubleRegister lhs,
DoubleRegister rhs) {
- FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
- TurboAssembler::CompareF64(dst, fcond, lhs, rhs);
+ FPUCondition fcond = ConditionToConditionCmpFPU(cond);
+ MacroAssembler::CompareF64(dst, fcond, lhs, rhs);
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -354,24 +330,41 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
Branch(target, condition, scratch, Operand(zero_reg));
}
+// Implemente vector popcnt refer dense_popcnt
+// int dense_popcnt(uint32_t n)
+// {
+// int count = 32; // sizeof(uint32_t) * CHAR_BIT;
+// n ^= 0xFF'FF'FF'FF;
+// while(n)
+// {
+// --count;
+// n &= n - 1;
+// }
+// return count;
+// }
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
VRegister src_v = src.fp().toV();
VRegister dst_v = dst.fp().toV();
- Label t;
-
+ Label t, done;
VU.set(kScratchReg, E8, m1);
vmv_vv(kSimd128ScratchReg, src_v);
- vmv_vv(dst_v, kSimd128RegZero);
-
+ li(kScratchReg, 0xFF);
+ vxor_vx(kSimd128ScratchReg, kSimd128ScratchReg, kScratchReg);
+ vmv_vi(dst_v, 8);
+ vmv_vi(kSimd128RegZero, 0);
bind(&t);
- vmsne_vv(v0, kSimd128ScratchReg, kSimd128RegZero);
- vadd_vi(dst_v, dst_v, 1, Mask);
- vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, Mask);
- vand_vv(kSimd128ScratchReg, kSimd128ScratchReg, kSimd128ScratchReg2);
- // kScratchReg = -1 if kSimd128ScratchReg == 0 i.e. no active element
- vfirst_m(kScratchReg, kSimd128ScratchReg);
- bgez(kScratchReg, &t);
+ vmsne_vi(v0, kSimd128ScratchReg, 0);
+ VU.set(kScratchReg, E16, m1);
+ vmv_xs(kScratchReg, v0);
+ beqz(kScratchReg, &done);
+ VU.set(kScratchReg, E8, m1);
+ vadd_vi(dst_v, dst_v, -1, MaskType::Mask);
+ vadd_vi(kSimd128ScratchReg2, kSimd128ScratchReg, -1, MaskType::Mask);
+ vand_vv(kSimd128ScratchReg, kSimd128ScratchReg2, kSimd128ScratchReg,
+ MaskType::Mask);
+ Branch(&t);
+ bind(&done);
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -616,23 +609,11 @@ void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst,
vsmul_vv(dst.fp().toV(), src1.fp().toV(), src2.fp().toV());
}
-void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_dot_i8x16_i7x16_s");
-}
-
-void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs,
- LiftoffRegister acc) {
- bailout(kSimd, "emit_i32x4_dot_i8x16_i7x16_add_s");
-}
-
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -988,20 +969,20 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E8, m1);
- Label alltrue;
- li(kScratchReg, -1);
- vmv_sx(kSimd128ScratchReg, kScratchReg);
+ Label notalltrue;
+ vmv_vi(kSimd128ScratchReg, -1);
vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
vmv_xs(dst.gp(), kSimd128ScratchReg);
- beqz(dst.gp(), &alltrue);
+ beqz(dst.gp(), &notalltrue);
li(dst.gp(), 1);
- bind(&alltrue);
+ bind(&notalltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E8, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -1140,6 +1121,7 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E16, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
VU.set(kScratchReg, E32, m1);
vmv_xs(dst.gp(), kSimd128ScratchReg);
@@ -1284,6 +1266,7 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E32, m1);
vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vx(kSimd128ScratchReg, zero_reg);
vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
vmv_xs(dst.gp(), kSimd128ScratchReg);
}
@@ -1406,6 +1389,67 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
}
+void LiftoffAssembler::emit_i16x8_dot_i8x16_i7x16_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ VU.set(kScratchReg, E8, m1);
+ vwmul_vv(kSimd128ScratchReg3, lhs.fp().toV(), rhs.fp().toV());
+ VU.set(kScratchReg, E16, m2);
+
+ constexpr int32_t FIRST_INDEX = 0b0101010101010101;
+ constexpr int32_t SECOND_INDEX = 0b1010101010101010;
+ li(kScratchReg, FIRST_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, kSimd128ScratchReg3, v0);
+
+ li(kScratchReg, SECOND_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, kSimd128ScratchReg3, kSimd128ScratchReg2);
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0);
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i8x16_i7x16_add_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ LiftoffRegister acc) {
+ VU.set(kScratchReg, E8, m1);
+ VRegister intermediate = kSimd128ScratchReg3;
+ vwmul_vv(intermediate, lhs.fp().toV(), rhs.fp().toV()); // i16*16 v8 v9
+
+ constexpr int32_t FIRST_INDEX = 0b0001000100010001;
+ constexpr int32_t SECOND_INDEX = 0b0010001000100010;
+ constexpr int32_t THIRD_INDEX = 0b0100010001000100;
+ constexpr int32_t FOURTH_INDEX = 0b1000100010001000;
+
+ VU.set(kScratchReg, E16, m2);
+ li(kScratchReg, FIRST_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
+ li(kScratchReg, SECOND_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
+
+ VU.set(kScratchReg, E16, m1);
+ vwadd_vv(dst.fp().toV(), kSimd128ScratchReg, v0); // i32*4 c
+
+ VU.set(kScratchReg, E16, m2);
+ li(kScratchReg, THIRD_INDEX);
+ vmv_sx(v0, kScratchReg);
+ vcompress_vv(kSimd128ScratchReg, intermediate, v0); // i16*4 a
+
+ li(kScratchReg, FOURTH_INDEX);
+ vmv_sx(kSimd128ScratchReg2, kScratchReg);
+ vcompress_vv(v0, intermediate, kSimd128ScratchReg2); // i16*4 b
+
+ VU.set(kScratchReg, E16, m1);
+ vwadd_vv(kSimd128ScratchReg3, kSimd128ScratchReg, v0); // i32*4 c
+
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), dst.fp().toV(), kSimd128ScratchReg3);
+ vadd_vv(dst.fp().toV(), dst.fp().toV(), acc.fp().toV());
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
VU.set(kScratchReg, E64, m1);
@@ -1723,8 +1767,9 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
VU.set(FPURoundingMode::RTZ);
vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
- vfcvt_x_f_v(dst.fp().toV(), src.fp().toV(), Mask);
+ vfcvt_x_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -1732,8 +1777,9 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
VU.set(FPURoundingMode::RTZ);
vmfeq_vv(v0, src.fp().toV(), src.fp().toV());
+ vmv_vv(kSimd128ScratchReg, src.fp().toV());
vmv_vx(dst.fp().toV(), zero_reg);
- vfcvt_xu_f_v(dst.fp().toV(), src.fp().toV(), Mask);
+ vfcvt_xu_f_v(dst.fp().toV(), kSimd128ScratchReg, Mask);
}
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
@@ -1754,48 +1800,48 @@ void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E16, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E8, m1);
VU.set(FPURoundingMode::RNE);
- vnclip_vi(dst.fp().toV(), v26, 0);
+ vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E16, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E16, m2);
- vmax_vx(v26, v26, zero_reg);
+ vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
VU.set(kScratchReg, E8, m1);
VU.set(FPURoundingMode::RNE);
- vnclipu_vi(dst.fp().toV(), v26, 0);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E16, m1);
VU.set(FPURoundingMode::RNE);
- vnclip_vi(dst.fp().toV(), v26, 0);
+ vnclip_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
VU.set(kScratchReg, E32, m1);
- vmv_vv(v26, lhs.fp().toV());
- vmv_vv(v27, lhs.fp().toV());
+ vmv_vv(kSimd128ScratchReg, lhs.fp().toV()); // kSimd128ScratchReg v24
+ vmv_vv(v25, rhs.fp().toV());
VU.set(kScratchReg, E32, m2);
- vmax_vx(v26, v26, zero_reg);
+ vmax_vx(kSimd128ScratchReg, kSimd128ScratchReg, zero_reg);
VU.set(kScratchReg, E16, m1);
VU.set(FPURoundingMode::RNE);
- vnclipu_vi(dst.fp().toV(), v26, 0);
+ vnclipu_vi(dst.fp().toV(), kSimd128ScratchReg, 0);
}
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
@@ -1890,6 +1936,7 @@ void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
VU.set(kScratchReg, E8, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1899,6 +1946,7 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
VU.set(kScratchReg, E16, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1908,6 +1956,7 @@ void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
VU.set(kScratchReg, E64, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -1917,6 +1966,7 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
VU.set(kScratchReg, E32, m1);
vmv_vx(kSimd128RegZero, zero_reg);
vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmv_vv(v0, kSimd128RegZero);
vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
vneg_vv(dst.fp().toV(), src.fp().toV(), MaskType::Mask);
}
@@ -2062,33 +2112,41 @@ void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfma");
+ VU.set(kScratchReg, E32, m1);
+ vfmadd_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f32x4_qfms");
+ VU.set(kScratchReg, E32, m1);
+ vfnmsub_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfma");
+ VU.set(kScratchReg, E64, m1);
+ vfmadd_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
LiftoffRegister src3) {
- bailout(kRelaxedSimd, "emit_f64x2_qfms");
+ VU.set(kScratchReg, E64, m1);
+ vfnmsub_vv(src1.fp().toV(), src2.fp().toV(), src3.fp().toV());
+ vmv_vv(dst.fp().toV(), src1.fp().toV());
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
- TurboAssembler::LoadWord(limit_address, MemOperand(limit_address));
- TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+ MacroAssembler::LoadWord(limit_address, MemOperand(limit_address));
+ MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
@@ -2121,7 +2179,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
int32_t offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
+ MacroAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
fp_regs.clear(reg);
offset += sizeof(double);
}
@@ -2134,7 +2192,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
int32_t fp_offset = 0;
while (!fp_regs.is_empty()) {
LiftoffRegister reg = fp_regs.GetFirstRegSet();
- TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
+ MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
fp_regs.clear(reg);
fp_offset += sizeof(double);
}
@@ -2168,7 +2226,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint(
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
- TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
+ MacroAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
@@ -2207,7 +2265,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
AddWord(sp, sp, Operand(-size));
- TurboAssembler::Move(addr, sp);
+ MacroAssembler::Move(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {