summaryrefslogtreecommitdiff
path: root/deps/v8/src/wasm/baseline
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-03-12 08:24:20 +0100
committerMichaël Zasso <targos@protonmail.com>2021-03-15 15:54:50 +0100
commit732ad99e47bae5deffa3a22d2ebe5500284106f0 (patch)
tree759a6b072accf188f03c74a84e8256fe92f1925c /deps/v8/src/wasm/baseline
parent802b3e7cf9a5074a72bec75cf1c46758b81e04b1 (diff)
downloadnode-new-732ad99e47bae5deffa3a22d2ebe5500284106f0.tar.gz
deps: update V8 to 9.0.257.11
PR-URL: https://github.com/nodejs/node/pull/37587 Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Myles Borins <myles.borins@gmail.com>
Diffstat (limited to 'deps/v8/src/wasm/baseline')
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h437
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h414
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h531
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h21
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.cc284
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h320
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc2092
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.h6
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-register.h51
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h344
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h375
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h184
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h2516
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h1113
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h471
15 files changed, 7226 insertions, 1933 deletions
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index bee45ad9af..b8c4911722 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_ARM_LIFTOFF_ASSEMBLER_ARM_H_
#include "src/base/platform/wrappers.h"
+#include "src/codegen/arm/register-arm.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/baseline/liftoff-register.h"
@@ -292,34 +293,35 @@ inline void F64x2Compare(LiftoffAssembler* assm, LiftoffRegister dst,
}
inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
- ValueType type) {
+ ValueKind kind) {
#ifdef DEBUG
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
DCHECK(UseScratchRegisterScope{assm}.CanAcquire());
#endif
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->str(src.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
// Positive offsets should be lowered to kI32.
assm->str(src.low_gp(), MemOperand(dst.rn(), dst.offset()));
assm->str(
src.high_gp(),
MemOperand(dst.rn(), dst.offset() + liftoff::kHalfStackSlotSize));
break;
- case ValueType::kF32:
+ case kF32:
assm->vstr(liftoff::GetFloatRegister(src.fp()), dst);
break;
- case ValueType::kF64:
+ case kF64:
assm->vstr(src.fp(), dst);
break;
- case ValueType::kS128: {
+ case kS128: {
UseScratchRegisterScope temps(assm);
Register addr = liftoff::CalculateActualAddress(assm, &temps, dst.rn(),
no_reg, dst.offset());
@@ -332,27 +334,28 @@ inline void Store(LiftoffAssembler* assm, LiftoffRegister src, MemOperand dst,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->ldr(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->ldr(dst.low_gp(), MemOperand(src.rn(), src.offset()));
assm->ldr(
dst.high_gp(),
MemOperand(src.rn(), src.offset() + liftoff::kHalfStackSlotSize));
break;
- case ValueType::kF32:
+ case kF32:
assm->vldr(liftoff::GetFloatRegister(dst.fp()), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->vldr(dst.fp(), src);
break;
- case ValueType::kS128: {
+ case kS128: {
// Get memory address of slot to fill from.
UseScratchRegisterScope temps(assm);
Register addr = liftoff::CalculateActualAddress(assm, &temps, src.rn(),
@@ -531,26 +534,26 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -558,10 +561,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Operand(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
- case ValueType::kF64: {
+ case kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
@@ -571,15 +574,31 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
- DCHECK_EQ(4, size);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
- ldr(dst, MemOperand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ MemOperand src{instance, offset};
+ switch (size) {
+ case 1:
+ ldrb(dst, src);
+ break;
+ case 4:
+ ldr(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ ldr(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -701,15 +720,23 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
- {
- // Store the value.
- UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
- str(src.gp(), dst_op);
+ Register actual_offset_reg = offset_reg;
+ if (offset_reg != no_reg && offset_imm != 0) {
+ if (cache_state()->is_used(LiftoffRegister(offset_reg))) {
+ actual_offset_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+ add(actual_offset_reg, offset_reg, Operand(offset_imm));
}
+ MemOperand dst_op = actual_offset_reg == no_reg
+ ? MemOperand(dst_addr, offset_imm)
+ : MemOperand(dst_addr, actual_offset_reg);
+ str(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -720,8 +747,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
- CallRecordWriteStub(dst_addr, Operand(offset_imm), EMIT_REMEMBERED_SET,
- kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr,
+ actual_offset_reg == no_reg ? Operand(offset_imm)
+ : Operand(actual_offset_reg),
+ EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -1041,12 +1071,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
// simpler, even though other register pairs would also be possible.
constexpr Register dst_low = r8;
constexpr Register dst_high = r9;
- if (cache_state()->is_used(LiftoffRegister(dst_low))) {
- SpillRegister(LiftoffRegister(dst_low));
- }
- if (cache_state()->is_used(LiftoffRegister(dst_high))) {
- SpillRegister(LiftoffRegister(dst_high));
- }
+ SpillRegisters(dst_low, dst_high);
{
UseScratchRegisterScope temps(this);
Register actual_addr = liftoff::CalculateActualAddress(
@@ -1056,7 +1081,7 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
}
ParallelRegisterMove(
- {{dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}});
+ {{dst, LiftoffRegister::ForPair(dst_low, dst_high), kI64}});
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
@@ -1178,11 +1203,10 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(new_value_low, new_value_high), new_value,
- kWasmI64},
- {LiftoffRegister::ForPair(expected_low, expected_high), expected,
- kWasmI64},
- {dst_addr, dst_addr_reg, kWasmI32},
- {offset, offset_reg != no_reg ? offset_reg : offset, kWasmI32}});
+ kI64},
+ {LiftoffRegister::ForPair(expected_low, expected_high), expected, kI64},
+ {dst_addr, dst_addr_reg, kI32},
+ {offset, offset_reg != no_reg ? offset_reg : offset, kI32}});
{
UseScratchRegisterScope temps(lasm);
@@ -1210,7 +1234,7 @@ inline void AtomicI64CompareExchange(LiftoffAssembler* lasm,
__ bind(&done);
__ ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(result_low, result_high), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(result_low, result_high), kI64}});
}
#undef __
} // namespace liftoff
@@ -1321,52 +1345,52 @@ void LiftoffAssembler::AtomicFence() { dmb(ISH); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(fp, (caller_slot_idx + 1) * kSystemPointerSize);
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand dst(fp, (caller_slot_idx + 1) * kSystemPointerSize);
- liftoff::Store(this, src, dst, type);
+ liftoff::Store(this, src, dst, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(sp, offset);
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(type == kWasmI32 || type.is_reference_type());
+ DCHECK(kind == kI32 || is_reference_type(kind));
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
vmov(liftoff::GetFloatRegister(dst), liftoff::GetFloatRegister(src));
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
vmov(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
vmov(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
// The {str} instruction needs a temp register when the immediate in the
// provided MemOperand does not fit into 12 bits. This happens for large stack
// frames. This DCHECK checks that the temp register is available when needed.
@@ -1374,7 +1398,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
DCHECK_LT(0, offset);
RecordUsedSpillOffset(offset);
MemOperand dst(fp, -offset);
- liftoff::Store(this, reg, dst, type);
+ liftoff::Store(this, reg, dst, kind);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
@@ -1390,11 +1414,11 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
src = temps.Acquire();
}
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
mov(src, Operand(value.to_i32()));
str(src, dst);
break;
- case ValueType::kI64: {
+ case kI64: {
int32_t low_word = value.to_i64();
mov(src, Operand(low_word));
str(src, liftoff::GetHalfStackSlot(offset, kLowWord));
@@ -1409,8 +1433,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
@@ -2161,16 +2185,16 @@ void LiftoffAssembler::emit_jump(Label* label) { b(label); }
void LiftoffAssembler::emit_jump(Register target) { bx(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
cmp(lhs, Operand(0));
} else {
- DCHECK(type == kWasmI32 ||
- (type.is_reference_type() &&
+ DCHECK(kind == kI32 ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
cmp(lhs, rhs);
}
@@ -2279,7 +2303,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -2362,7 +2386,33 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ UseScratchRegisterScope temps(this);
+ Register actual_src_addr = liftoff::CalculateActualAddress(
+ this, &temps, addr, offset_reg, offset_imm);
+ TurboAssembler::Move(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+ *protected_load_pc = pc_offset();
+ LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
+ NeonListOperand dst_op =
+ NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp());
+ TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx,
+ NeonMemOperand(actual_src_addr));
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t laneidx,
+ uint32_t* protected_store_pc) {
+ UseScratchRegisterScope temps(this);
+ Register actual_dst_addr =
+ liftoff::CalculateActualAddress(this, &temps, dst, offset, offset_imm);
+ *protected_store_pc = pc_offset();
+
+ LoadStoreLaneParams store_params(type.mem_rep(), laneidx);
+ NeonListOperand src_op =
+ NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp());
+ TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx,
+ NeonMemOperand(actual_dst_addr));
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -2548,6 +2598,27 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
vmov(dest.high(), right.high(), gt);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_s32(dst.low_fp(), src_d.low());
+ vcvt_f64_s32(dst.high_fp(), src_d.high());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_u32(dst.low_fp(), src_d.low());
+ vcvt_f64_u32(dst.high_fp(), src_d.high());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister src_d = LowDwVfpRegister::from_code(src.low_fp().code());
+ vcvt_f64_f32(dst.low_fp(), src_d.low());
+ vcvt_f64_f32(dst.high_fp(), src_d.high());
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon32, liftoff::GetSimd128Register(dst), src.fp(), 0);
@@ -2767,6 +2838,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ V64x2AllTrue(dst.gp(), liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShift<liftoff::kLeft, NeonS64, Neon32>(this, dst, lhs, rhs);
@@ -2890,7 +2966,27 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ I64x2BitMask(dst.gp(), liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), src.high_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), src.low_fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), src.high_fp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2920,11 +3016,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
@@ -3058,6 +3149,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
vpadd(Neon32, dest.high(), scratch.low(), scratch.high());
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3097,11 +3198,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
UseScratchRegisterScope temps(this);
@@ -3271,6 +3367,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
imm_lane_idx);
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3297,6 +3403,14 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
src2.high_fp());
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ vqrdmulh(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src1),
+ liftoff::GetSimd128Register(src2));
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -3349,6 +3463,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcnt(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon8, liftoff::GetSimd128Register(dst), src.gp());
@@ -3381,8 +3500,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -3644,6 +3763,29 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
liftoff::GetSimd128Register(lhs), liftoff::GetSimd128Register(rhs));
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2Eq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2GtS(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ I64x2GeS(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
+ liftoff::GetSimd128Register(rhs));
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
vceq(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(lhs),
@@ -3754,6 +3896,14 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_f32_f64(dst_d.low(), src.low_fp());
+ vcvt_f32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3818,6 +3968,22 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
vmovl(NeonU16, liftoff::GetSimd128Register(dst), src.high_fp());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_s32_f64(dst_d.low(), src.low_fp());
+ vcvt_s32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ LowDwVfpRegister dst_d = LowDwVfpRegister::from_code(dst.low_fp().code());
+ vcvt_u32_f64(dst_d.low(), src.low_fp());
+ vcvt_u32_f64(dst_d.high(), src.high_fp());
+ vmov(dst.high_fp(), 0);
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3857,6 +4023,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
ldr(limit_address, MemOperand(limit_address));
cmp(sp, limit_address);
@@ -3942,10 +4113,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// Arguments are passed by pushing them all to the stack and then passing
// a pointer to them.
@@ -3954,22 +4125,22 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- switch (param_type.kind()) {
- case ValueType::kI32:
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
str(args->gp(), MemOperand(sp, arg_bytes));
break;
- case ValueType::kI64:
+ case kI64:
str(args->low_gp(), MemOperand(sp, arg_bytes));
str(args->high_gp(), MemOperand(sp, arg_bytes + kSystemPointerSize));
break;
- case ValueType::kF32:
+ case kF32:
vstr(liftoff::GetFloatRegister(args->fp()), MemOperand(sp, arg_bytes));
break;
- case ValueType::kF64:
+ case kF64:
vstr(args->fp(), MemOperand(sp, arg_bytes));
break;
- case ValueType::kS128:
+ case kS128:
vstr(args->low_fp(), MemOperand(sp, arg_bytes));
vstr(args->high_fp(),
MemOperand(sp, arg_bytes + 2 * kSystemPointerSize));
@@ -3978,7 +4149,7 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
UNREACHABLE();
}
args++;
- arg_bytes += param_type.element_size_bytes();
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4002,22 +4173,22 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- switch (out_argument_type.kind()) {
- case ValueType::kI32:
+ if (out_argument_kind != kStmt) {
+ switch (out_argument_kind) {
+ case kI32:
ldr(result_reg->gp(), MemOperand(sp));
break;
- case ValueType::kI64:
+ case kI64:
ldr(result_reg->low_gp(), MemOperand(sp));
ldr(result_reg->high_gp(), MemOperand(sp, kSystemPointerSize));
break;
- case ValueType::kF32:
+ case kF32:
vldr(liftoff::GetFloatRegister(result_reg->fp()), MemOperand(sp));
break;
- case ValueType::kF64:
+ case kF64:
vldr(result_reg->fp(), MemOperand(sp));
break;
- case ValueType::kS128:
+ case kS128:
vld1(Neon8, NeonListOperand(result_reg->low_fp(), 2),
NeonMemOperand(sp));
break;
@@ -4036,7 +4207,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
DCHECK(target != no_reg);
@@ -4068,25 +4239,27 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- switch (src.type().kind()) {
+ switch (src.kind()) {
// i32 and i64 can be treated as similar cases, i64 being previously
// split into two i32 registers
- case ValueType::kI32:
- case ValueType::kI64:
- case ValueType::kF32: {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kRef:
+ case kOptRef: {
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
asm_->ldr(scratch,
liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
asm_->Push(scratch);
} break;
- case ValueType::kF64: {
+ case kF64: {
UseScratchRegisterScope temps(asm_);
DwVfpRegister scratch = temps.AcquireD();
asm_->vldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->vpush(scratch);
} break;
- case ValueType::kS128: {
+ case kS128: {
MemOperand mem_op = liftoff::GetStackSlot(slot.src_offset_);
UseScratchRegisterScope temps(asm_);
Register addr = liftoff::CalculateActualAddress(
@@ -4102,24 +4275,24 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
- switch (src.type().kind()) {
- case ValueType::kI64: {
+ switch (src.kind()) {
+ case kI64: {
LiftoffRegister reg =
slot.half_ == kLowWord ? src.reg().low() : src.reg().high();
asm_->push(reg.gp());
} break;
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ case kI32:
+ case kRef:
+ case kOptRef:
asm_->push(src.reg().gp());
break;
- case ValueType::kF32:
+ case kF32:
asm_->vpush(liftoff::GetFloatRegister(src.reg().fp()));
break;
- case ValueType::kF64:
+ case kF64:
asm_->vpush(src.reg().fp());
break;
- case ValueType::kS128:
+ case kS128:
asm_->vpush(liftoff::GetSimd128Register(src.reg()));
break;
default:
@@ -4127,7 +4300,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kIntConst: {
- DCHECK(src.type() == kWasmI32 || src.type() == kWasmI64);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
UseScratchRegisterScope temps(asm_);
Register scratch = temps.Acquire();
// The high word is the sign extension of the low word.
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 815586ecd1..a2fe80891c 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -72,20 +72,21 @@ inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
-inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline CPURegister GetRegFromType(const LiftoffRegister& reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
return reg.gp().W();
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
return reg.gp().X();
- case ValueType::kF32:
+ case kF32:
return reg.fp().S();
- case ValueType::kF64:
+ case kF64:
return reg.fp().D();
- case ValueType::kS128:
+ case kS128:
return reg.fp().Q();
default:
UNREACHABLE();
@@ -103,15 +104,15 @@ inline CPURegList PadVRegList(RegList list) {
}
inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
return temps->AcquireW();
- case ValueType::kI64:
+ case kI64:
return temps->AcquireX();
- case ValueType::kF32:
+ case kF32:
return temps->AcquireS();
- case ValueType::kF64:
+ case kF64:
return temps->AcquireD();
default:
UNREACHABLE();
@@ -124,15 +125,37 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
Register offset, T offset_imm) {
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
- Register tmp = temps->AcquireW();
- // TODO(clemensb): Do a 64-bit addition if memory64 is used.
+ Register tmp = temps->AcquireX();
DCHECK_GE(kMaxUInt32, offset_imm);
- assm->Add(tmp, offset.W(), offset_imm);
- return MemOperand(addr.X(), tmp, UXTW);
+ assm->Add(tmp, offset.X(), offset_imm);
+ return MemOperand(addr.X(), tmp);
}
return MemOperand(addr.X(), offset_imm);
}
+// Certain load instructions do not support offset (register or immediate).
+// This creates a MemOperand that is suitable for such instructions by adding
+// |addr|, |offset| (if needed), and |offset_imm| into a temporary.
+inline MemOperand GetMemOpWithImmOffsetZero(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps,
+ Register addr, Register offset,
+ uintptr_t offset_imm) {
+ Register tmp = temps->AcquireX();
+ if (offset.is_valid()) {
+ // offset has passed BoundsCheckMem in liftoff-compiler, and been unsigned
+ // extended, so it is fine to use the full width of the register.
+ assm->Add(tmp, addr, offset);
+ if (offset_imm != 0) {
+ assm->Add(tmp, tmp, offset_imm);
+ }
+ } else {
+ if (offset_imm != 0) {
+ assm->Add(tmp, addr, offset_imm);
+ }
+ }
+ return MemOperand(tmp.X(), 0);
+}
+
enum class ShiftDirection : bool { kLeft, kRight };
enum class ShiftSign : bool { kSigned, kUnsigned };
@@ -334,34 +357,34 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
// TODO(zhin): Unaligned access typically take additional cycles, we should do
// some performance testing to see how big an effect it will take.
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
Mov(reg.gp().W(), Immediate(value.to_i32(), rmode));
break;
- case ValueType::kI64:
+ case kI64:
Mov(reg.gp().X(), Immediate(value.to_i64(), rmode));
break;
- case ValueType::kF32:
+ case kF32:
Fmov(reg.fp().S(), value.to_f32_boxed().get_scalar());
break;
- case ValueType::kF64:
+ case kF64:
Fmov(reg.fp().D(), value.to_f64_boxed().get_scalar());
break;
default:
@@ -369,21 +392,34 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- Ldr(dst.W(), MemOperand(dst, offset));
- } else {
- Ldr(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ MemOperand src{instance, offset};
+ switch (size) {
+ case 1:
+ Ldrb(dst.W(), src);
+ break;
+ case 4:
+ Ldr(dst.W(), src);
+ break;
+ case 8:
+ Ldr(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
DCHECK_LE(0, offset);
- Ldr(dst, liftoff::GetInstanceOperand());
- LoadTaggedPointerField(dst, MemOperand(dst, offset));
+ LoadTaggedPointerField(dst, MemOperand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -408,12 +444,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
// Store the value.
UseScratchRegisterScope temps(this);
MemOperand dst_op =
liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -427,8 +467,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
- CallRecordWriteStub(dst_addr, Operand(offset_imm), EMIT_REMEMBERED_SET,
- kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(
+ dst_addr,
+ dst_op.IsRegisterOffset() ? Operand(dst_op.regoffset().X())
+ : Operand(dst_op.offset()),
+ EMIT_REMEMBERED_SET, kSaveFPRegs, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -797,56 +840,56 @@ void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
- Ldr(liftoff::GetRegFromType(dst, type), MemOperand(fp, offset));
+ Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = (caller_slot_idx + 1) * LiftoffAssembler::kStackSlotSize;
- Str(liftoff::GetRegFromType(src, type), MemOperand(fp, offset));
+ Str(liftoff::GetRegFromType(src, kind), MemOperand(fp, offset));
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- Ldr(liftoff::GetRegFromType(dst, type), MemOperand(sp, offset));
+ ValueKind kind) {
+ Ldr(liftoff::GetRegFromType(dst, kind), MemOperand(sp, offset));
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
UseScratchRegisterScope temps(this);
- CPURegister scratch = liftoff::AcquireByType(&temps, type);
+ CPURegister scratch = liftoff::AcquireByType(&temps, kind);
Ldr(scratch, liftoff::GetStackSlot(src_offset));
Str(scratch, liftoff::GetStackSlot(dst_offset));
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- if (type == kWasmI32) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ if (kind == kI32) {
Mov(dst.W(), src.W());
} else {
- DCHECK(kWasmI64 == type || type.is_reference_type());
+ DCHECK(kI64 == kind || is_reference_type(kind));
Mov(dst.X(), src.X());
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
- if (type == kWasmF32) {
+ ValueKind kind) {
+ if (kind == kF32) {
Fmov(dst.S(), src.S());
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
Fmov(dst.D(), src.D());
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Mov(dst.Q(), src.Q());
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- Str(liftoff::GetRegFromType(reg, type), dst);
+ Str(liftoff::GetRegFromType(reg, kind), dst);
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
@@ -855,7 +898,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
UseScratchRegisterScope temps(this);
CPURegister src = CPURegister::no_reg();
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
if (value.to_i32() == 0) {
src = wzr;
} else {
@@ -863,7 +906,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Mov(src.W(), value.to_i32());
}
break;
- case ValueType::kI64:
+ case kI64:
if (value.to_i64() == 0) {
src = xzr;
} else {
@@ -878,9 +921,9 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
Str(src, dst);
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- Ldr(liftoff::GetRegFromType(reg, type), src);
+ Ldr(liftoff::GetRegFromType(reg, kind), src);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
@@ -1463,24 +1506,25 @@ void LiftoffAssembler::emit_jump(Label* label) { B(label); }
void LiftoffAssembler::emit_jump(Register target) { Br(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
if (rhs.is_valid()) {
Cmp(lhs.W(), rhs.W());
} else {
Cmp(lhs.W(), wzr);
}
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(rhs.is_valid());
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI64:
+ case kI64:
if (rhs.is_valid()) {
Cmp(lhs.X(), rhs.X());
} else {
@@ -1554,7 +1598,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1572,7 +1616,10 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this);
MemOperand src_op =
- liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
+ transform == LoadTransformationKind::kSplat
+ ? liftoff::GetMemOpWithImmOffsetZero(this, &temps, src_addr,
+ offset_reg, offset_imm)
+ : liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -1604,20 +1651,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
Ldr(dst.fp().D(), src_op);
}
} else {
- // ld1r only allows no offset or post-index, so emit an add.
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
- if (src_op.IsRegisterOffset()) {
- // We have 2 tmp gps, so it's okay to acquire 1 more here, and actually
- // doesn't matter if we acquire the same one.
- Register tmp = temps.AcquireX();
- Add(tmp, src_op.base(), src_op.regoffset().X());
- src_op = MemOperand(tmp.X(), 0);
- } else if (src_op.IsImmediateOffset() && src_op.offset() != 0) {
- Register tmp = temps.AcquireX();
- Add(tmp, src_op.base(), src_op.offset());
- src_op = MemOperand(tmp.X(), 0);
- }
-
if (memtype == MachineType::Int8()) {
ld1r(dst.fp().V16B(), src_op);
} else if (memtype == MachineType::Int16()) {
@@ -1634,7 +1668,49 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op = liftoff::GetMemOpWithImmOffsetZero(
+ this, &temps, addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+
+ MachineType mem_type = type.mem_type();
+ if (dst != src) {
+ Mov(dst.fp().Q(), src.fp().Q());
+ }
+
+ if (mem_type == MachineType::Int8()) {
+ ld1(dst.fp().B(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int16()) {
+ ld1(dst.fp().H(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int32()) {
+ ld1(dst.fp().S(), laneidx, src_op);
+ } else if (mem_type == MachineType::Int64()) {
+ ld1(dst.fp().D(), laneidx, src_op);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ UseScratchRegisterScope temps(this);
+ MemOperand dst_op =
+ liftoff::GetMemOpWithImmOffsetZero(this, &temps, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ st1(src.fp().B(), lane, dst_op);
+ } else if (rep == MachineRepresentation::kWord16) {
+ st1(src.fp().H(), lane, dst_op);
+ } else if (rep == MachineRepresentation::kWord32) {
+ st1(src.fp().S(), lane, dst_op);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ st1(src.fp().D(), lane, dst_op);
+ }
}
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
@@ -1767,6 +1843,23 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
}
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp(), src.fp().V2S());
+ Scvtf(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp(), src.fp().V2S());
+ Ucvtf(dst.fp(), dst.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtl(dst.fp().V2D(), src.fp().V2S());
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V4S(), src.fp().S(), 0);
@@ -1917,6 +2010,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
Neg(dst.fp().V2D(), src.fp().V2D());
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ V64x2AllTrue(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
@@ -2014,7 +2112,27 @@ void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ I64x2BitMask(dst.gp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl(dst.fp().V2D(), src.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Sxtl2(dst.fp().V2D(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl(dst.fp().V2D(), src.fp().V2S());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Uxtl2(dst.fp().V2D(), src.fp().V4S());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2043,11 +2161,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
Neg(dst.fp().V4S(), src.fp().V4S());
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat4S);
@@ -2158,6 +2271,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
Addp(dst.fp().V4S(), tmp1, tmp2);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2214,11 +2337,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
Neg(dst.fp().V8H(), src.fp().V8H());
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, kFormat8H);
@@ -2383,6 +2501,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cnt(dst.fp().V16B(), src.fp().V16B());
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V16B(), src.gp().W());
@@ -2415,8 +2538,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
Neg(dst.fp().V16B(), src.fp().V16B());
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2638,6 +2761,27 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Cmhs(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmeq(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+ Mvn(dst.fp().V2D(), dst.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmgt(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Cmge(dst.fp().V2D(), lhs.fp().V2D(), rhs.fp().V2D());
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
Fcmeq(dst.fp().V4S(), lhs.fp().V4S(), rhs.fp().V4S());
@@ -2736,6 +2880,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
Ucvtf(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtn(dst.fp().V2S(), src.fp().V2D());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2832,6 +2981,18 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
Uxtl2(dst.fp().V4S(), src.fp().V8H());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V2D(), src.fp().V2D());
+ Sqxtn(dst.fp().V2S(), dst.fp().V2D());
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V2D(), src.fp().V2D());
+ Uqxtn(dst.fp().V2S(), dst.fp().V2D());
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2860,6 +3021,16 @@ void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
Abs(dst.fp().V8H(), src.fp().V8H());
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2884,11 +3055,22 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
Umull2(dst.fp().V8H(), src1.fp().V16B(), src2.fp().V16B());
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ Sqrdmulh(dst.fp().V8H(), src1.fp().V8H(), src2.fp().V8H());
+}
+
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
Abs(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
Ldr(limit_address, MemOperand(limit_address));
Cmp(sp, limit_address);
@@ -2942,10 +3124,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
Ret();
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
// The stack pointer is required to be quadword aligned.
int total_size = RoundUp(stack_bytes, kQuadWordSizeInBytes);
@@ -2953,9 +3135,9 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
Claim(total_size, 1);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- Poke(liftoff::GetRegFromType(*args++, param_type), arg_bytes);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ Poke(liftoff::GetRegFromType(*args++, param_kind), arg_bytes);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2978,8 +3160,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_type), 0);
+ if (out_argument_kind != kStmt) {
+ Peek(liftoff::GetRegFromType(*next_result_reg, out_argument_kind), 0);
}
Drop(total_size, 1);
@@ -2993,7 +3175,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// For Arm64, we have more cache registers than wasm parameters. That means
@@ -3035,34 +3217,34 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
void LiftoffStackSlots::Construct() {
size_t num_slots = 0;
for (auto& slot : slots_) {
- num_slots += slot.src_.type() == kWasmS128 ? 2 : 1;
+ num_slots += slot.src_.kind() == kS128 ? 2 : 1;
}
// The stack pointer is required to be quadword aligned.
asm_->Claim(RoundUp(num_slots, 2));
size_t poke_offset = num_slots * kXRegSize;
for (auto& slot : slots_) {
- poke_offset -= slot.src_.type() == kWasmS128 ? kXRegSize * 2 : kXRegSize;
+ poke_offset -= slot.src_.kind() == kS128 ? kXRegSize * 2 : kXRegSize;
switch (slot.src_.loc()) {
case LiftoffAssembler::VarState::kStack: {
UseScratchRegisterScope temps(asm_);
- CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.type());
+ CPURegister scratch = liftoff::AcquireByType(&temps, slot.src_.kind());
asm_->Ldr(scratch, liftoff::GetStackSlot(slot.src_offset_));
asm_->Poke(scratch, poke_offset);
break;
}
case LiftoffAssembler::VarState::kRegister:
- asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.type()),
+ asm_->Poke(liftoff::GetRegFromType(slot.src_.reg(), slot.src_.kind()),
poke_offset);
break;
case LiftoffAssembler::VarState::kIntConst:
- DCHECK(slot.src_.type() == kWasmI32 || slot.src_.type() == kWasmI64);
+ DCHECK(slot.src_.kind() == kI32 || slot.src_.kind() == kI64);
if (slot.src_.i32_const() == 0) {
- Register zero_reg = slot.src_.type() == kWasmI32 ? wzr : xzr;
+ Register zero_reg = slot.src_.kind() == kI32 ? wzr : xzr;
asm_->Poke(zero_reg, poke_offset);
} else {
UseScratchRegisterScope temps(asm_);
- Register scratch = slot.src_.type() == kWasmI32 ? temps.AcquireW()
- : temps.AcquireX();
+ Register scratch =
+ slot.src_.kind() == kI32 ? temps.AcquireW() : temps.AcquireX();
asm_->Mov(scratch, int64_t{slot.src_.i32_const()});
asm_->Poke(scratch, poke_offset);
}
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 890337fe12..ec52468a1a 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -65,26 +65,27 @@ static constexpr LiftoffRegList kByteRegs =
LiftoffRegList::FromBits<Register::ListOf(eax, ecx, edx)>();
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
- int32_t offset, ValueType type) {
+ int32_t offset, ValueKind kind) {
Operand src(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->mov(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->mov(dst.low_gp(), src);
assm->mov(dst.high_gp(), Operand(base, offset + 4));
break;
- case ValueType::kF32:
+ case kF32:
assm->movss(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->movsd(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->movdqu(dst.fp(), src);
break;
default:
@@ -93,23 +94,23 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
Operand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
assm->mov(dst, src.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->mov(dst, src.low_gp());
assm->mov(Operand(base, offset + 4), src.high_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->movss(dst, src.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->movsd(dst, src.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->movdqu(dst, src.fp());
break;
default:
@@ -117,26 +118,26 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
assm->push(reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->push(reg.high_gp());
assm->push(reg.low_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->AllocateStackSpace(sizeof(float));
assm->movss(Operand(esp, 0), reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->AllocateStackSpace(sizeof(double));
assm->movsd(Operand(esp, 0), reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->AllocateStackSpace(sizeof(double) * 2);
assm->movdqu(Operand(esp, 0), reg.fp());
break;
@@ -145,13 +146,6 @@ inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
}
}
-template <typename... Regs>
-inline void SpillRegisters(LiftoffAssembler* assm, Regs... regs) {
- for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
- if (assm->cache_state()->is_used(r)) assm->SpillRegister(r);
- }
-}
-
inline void SignExtendI32ToI64(Assembler* assm, LiftoffRegister reg) {
assm->mov(reg.high_gp(), reg.low_gp());
assm->sar(reg.high_gp(), 31);
@@ -163,7 +157,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
+ return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@@ -267,22 +261,22 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.is_reference_type() ? kSystemPointerSize
- : type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ return is_reference_type(kind) ? kSystemPointerSize
+ : element_size_bytes(kind);
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -290,10 +284,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::Move(reg.high_gp(), Immediate(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -301,15 +295,31 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
- DCHECK_EQ(4, size);
- mov(dst, Operand(dst, offset));
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ Operand src{instance, offset};
+ switch (size) {
+ case 1:
+ movzx_b(dst, src);
+ break;
+ case 4:
+ mov(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ mov(dst, Operand{instance, offset});
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -334,16 +344,19 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
DCHECK_GE(offset_imm, 0);
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
STATIC_ASSERT(kTaggedSize == kInt32Size);
- Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Operand dst_op = offset_reg == no_reg
? Operand(dst_addr, offset_imm)
: Operand(dst_addr, offset_reg, times_1, offset_imm);
mov(dst_op, src.gp());
+ if (skip_write_barrier) return;
+
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
@@ -468,7 +481,7 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
LiftoffRegList pinned_byte = pinned | LiftoffRegList::ForRegs(dst_addr);
if (offset_reg != no_reg) pinned_byte.set(offset_reg);
Register byte_src =
- GetUnusedRegister(liftoff::kByteRegs, pinned_byte).gp();
+ GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned_byte)).gp();
mov(byte_src, src.gp());
mov_b(dst_op, byte_src);
}
@@ -562,11 +575,12 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
// If there are no unused candidate registers, but {src} is a candidate,
// then spill other uses of {src}. Otherwise spill any candidate register
// and use that.
- if (!cache_state_.has_unused_register(src_candidates, pinned) &&
+ LiftoffRegList unpinned_candidates = src_candidates.MaskOut(pinned);
+ if (!cache_state_.has_unused_register(unpinned_candidates) &&
src_candidates.has(src)) {
SpillRegister(src);
} else {
- Register safe_src = GetUnusedRegister(src_candidates, pinned).gp();
+ Register safe_src = GetUnusedRegister(unpinned_candidates).gp();
mov(safe_src, src_gp);
src_gp = safe_src;
}
@@ -614,7 +628,7 @@ inline void AtomicAddOrSubOrExchange32(LiftoffAssembler* lasm, Binop binop,
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
- __ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ __ GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)).gp();
__ mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
}
@@ -811,10 +825,10 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
std::swap(dst_addr, offset_reg);
}
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(lasm, old_hi, old_lo, new_hi, base, offset);
+ __ SpillRegisters(old_hi, old_lo, new_hi, base, offset);
__ ParallelRegisterMove(
{{LiftoffRegister::ForPair(base, offset),
- LiftoffRegister::ForPair(dst_addr, offset_reg), kWasmI64}});
+ LiftoffRegister::ForPair(dst_addr, offset_reg), kI64}});
Operand dst_op_lo = Operand(base, offset, times_1, offset_imm);
Operand dst_op_hi = Operand(base, offset, times_1, offset_imm + 4);
@@ -863,7 +877,7 @@ inline void AtomicBinop64(LiftoffAssembler* lasm, Binop op, Register dst_addr,
// Move the result into the correct registers.
__ ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(old_lo, old_hi), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(old_lo, old_hi), kI64}});
}
#undef __
@@ -981,7 +995,8 @@ void LiftoffAssembler::AtomicCompareExchange(
// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
- pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp();
+ pinned.set(GetUnusedRegister(liftoff::kByteRegs.MaskOut(pinned)))
+ .gp();
mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
pinned.clear(LiftoffRegister(value_reg));
@@ -1041,7 +1056,7 @@ void LiftoffAssembler::AtomicCompareExchange(
Register address = esi;
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address);
+ SpillRegisters(expected_hi, expected_lo, new_hi, address);
// We have to set new_lo specially, because it's the root register. We do it
// before setting all other registers so that the original value does not get
@@ -1050,9 +1065,9 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move all other values into the right register.
ParallelRegisterMove(
- {{LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32},
- {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64},
- {LiftoffRegister(new_hi), new_value.high(), kWasmI32}});
+ {{LiftoffRegister(address), LiftoffRegister(dst_addr), kI32},
+ {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kI64},
+ {LiftoffRegister(new_hi), new_value.high(), kI32}});
Operand dst_op = Operand(address, offset_imm);
@@ -1064,33 +1079,33 @@ void LiftoffAssembler::AtomicCompareExchange(
// Move the result into the correct registers.
ParallelRegisterMove(
- {{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}});
+ {{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kI64}});
}
void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
liftoff::Load(this, dst, ebp, kSystemPointerSize * (caller_slot_idx + 1),
- type);
+ kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
- ValueType type) {
- liftoff::Load(this, reg, esp, offset, type);
+ ValueKind kind) {
+ liftoff::Load(this, reg, esp, offset, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
liftoff::Store(this, ebp, kSystemPointerSize * (caller_slot_idx + 1), src,
- type);
+ kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
- if (needs_gp_reg_pair(type)) {
+ ValueKind kind) {
+ if (needs_gp_reg_pair(kind)) {
liftoff::MoveStackValue(this,
liftoff::GetHalfStackSlot(src_offset, kLowWord),
liftoff::GetHalfStackSlot(dst_offset, kLowWord));
@@ -1103,46 +1118,47 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
}
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- DCHECK(kWasmI32 == type || type.is_reference_type());
+ DCHECK(kI32 == kind || is_reference_type(kind));
mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
movss(dst, src);
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
movsd(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Movaps(dst, src);
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
mov(dst, reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
mov(liftoff::GetHalfStackSlot(offset, kLowWord), reg.low_gp());
mov(liftoff::GetHalfStackSlot(offset, kHighWord), reg.high_gp());
break;
- case ValueType::kF32:
+ case kF32:
movss(dst, reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
movsd(dst, reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
movdqu(dst, reg.fp());
break;
default:
@@ -1154,10 +1170,10 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
mov(dst, Immediate(value.to_i32()));
break;
- case ValueType::kI64: {
+ case kI64: {
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
mov(liftoff::GetHalfStackSlot(offset, kLowWord), Immediate(low_word));
@@ -1170,8 +1186,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, ebp, -offset, type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, ebp, -offset, kind);
}
void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) {
@@ -1290,7 +1306,7 @@ void EmitInt32DivOrRem(LiftoffAssembler* assm, Register dst, Register lhs,
// another temporary register.
// Do all this before any branch, such that the code is executed
// unconditionally, as the cache state will also be modified unconditionally.
- liftoff::SpillRegisters(assm, eax, edx);
+ assm->SpillRegisters(eax, edx);
if (rhs == eax || rhs == edx) {
LiftoffRegList unavailable = LiftoffRegList::ForRegs(eax, edx, lhs);
Register tmp = assm->GetUnusedRegister(kGpReg, unavailable).gp();
@@ -1501,7 +1517,7 @@ inline void OpWithCarry(LiftoffAssembler* assm, LiftoffRegister dst,
// If necessary, move result into the right registers.
LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high);
- if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64);
+ if (tmp_result != dst) assm->Move(dst, tmp_result, kI64);
}
template <void (Assembler::*op)(Register, const Immediate&),
@@ -1557,12 +1573,11 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
Register rhs_lo = esi;
// Spill all these registers if they are still holding other values.
- liftoff::SpillRegisters(this, dst_hi, dst_lo, lhs_hi, rhs_lo);
+ SpillRegisters(dst_hi, dst_lo, lhs_hi, rhs_lo);
// Move lhs and rhs into the respective registers.
- ParallelRegisterMove(
- {{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kWasmI64},
- {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kWasmI64}});
+ ParallelRegisterMove({{LiftoffRegister::ForPair(lhs_lo, lhs_hi), lhs, kI64},
+ {LiftoffRegister::ForPair(rhs_lo, rhs_hi), rhs, kI64}});
// First mul: lhs_hi' = lhs_hi * rhs_lo.
imul(lhs_hi, rhs_lo);
@@ -1577,7 +1592,7 @@ void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
// Finally, move back the temporary result to the actual dst register pair.
LiftoffRegister dst_tmp = LiftoffRegister::ForPair(dst_lo, dst_hi);
- if (dst != dst_tmp) Move(dst, dst_tmp, kWasmI64);
+ if (dst != dst_tmp) Move(dst, dst_tmp, kI64);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1644,11 +1659,11 @@ inline void Emit64BitShiftOperation(
(assm->cache_state()->is_used(LiftoffRegister(ecx)) ||
pinned.has(LiftoffRegister(ecx)))) {
ecx_replace = assm->GetUnusedRegister(kGpReg, pinned).gp();
- reg_moves.emplace_back(ecx_replace, ecx, kWasmI32);
+ reg_moves.emplace_back(ecx_replace, ecx, kI32);
}
- reg_moves.emplace_back(dst, src, kWasmI64);
- reg_moves.emplace_back(ecx, amount, kWasmI32);
+ reg_moves.emplace_back(dst, src, kI64);
+ reg_moves.emplace_back(ecx, amount, kI32);
assm->ParallelRegisterMove(VectorOf(reg_moves));
// Do the actual shift.
@@ -1673,7 +1688,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shl(dst.high_gp(), amount - 32);
xor_(dst.low_gp(), dst.low_gp());
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
ShlPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -1693,7 +1708,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) sar(dst.low_gp(), amount - 32);
sar(dst.high_gp(), 31);
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
SarPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -1711,7 +1726,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
if (amount != 32) shr(dst.low_gp(), amount - 32);
xor_(dst.high_gp(), dst.high_gp());
} else {
- if (dst != src) Move(dst, src, kWasmI64);
+ if (dst != src) Move(dst, src, kI64);
ShrPair(dst.high_gp(), dst.low_gp(), amount);
}
}
@@ -2386,24 +2401,25 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
- switch (type.kind()) {
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI32:
+ case kI32:
cmp(lhs, rhs);
break;
default:
UNREACHABLE();
}
} else {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
test(lhs, lhs);
}
@@ -2555,7 +2571,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -2617,7 +2633,7 @@ template <void (Assembler::*avx_op)(XMMRegister, XMMRegister, XMMRegister),
void (Assembler::*sse_op)(XMMRegister, XMMRegister), uint8_t width>
void EmitSimdShiftOp(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister operand, LiftoffRegister count) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
LiftoffRegister tmp =
assm->GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(count));
constexpr int mask = (1 << width) - 1;
@@ -2695,7 +2711,11 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister src) {
+ LiftoffRegister src,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
Register tmp =
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
XMMRegister tmp_simd = liftoff::kScratchDoubleReg;
@@ -2763,7 +2783,52 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand src_op{addr, offset_reg, times_1, static_cast<int32_t>(offset_imm)};
+ *protected_load_pc = pc_offset();
+
+ MachineType mem_type = type.mem_type();
+ if (mem_type == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ } else if (mem_type == MachineType::Int32()) {
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ } else {
+ DCHECK_EQ(MachineType::Int64(), mem_type);
+ if (laneidx == 0) {
+ Movlps(dst.fp(), src.fp(), src_op);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst.fp(), src.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand dst_op = Operand(dst, offset, times_1, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ Pextrb(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord16) {
+ Pextrw(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord32) {
+ S128Store32Lane(dst_op, src.fp(), lane);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ if (lane == 0) {
+ Movlps(dst_op, src.fp());
+ } else {
+ DCHECK_EQ(1, lane);
+ Movhps(dst_op, src.fp());
+ }
+ }
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -2826,6 +2891,15 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
Pshufb(dst.fp(), lhs.fp(), mask);
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register scratch = GetUnusedRegister(RegClass::kGpReg, {}).gp();
+ XMMRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, src))
+ .fp();
+ I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp, scratch);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -3048,6 +3122,75 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Pcmpeqd(dst.fp(), ref);
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst == lhs.
+ if (dst != lhs) {
+ movdqa(dst.fp(), lhs.fp());
+ }
+ I64x2GtS(dst.fp(), dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ LiftoffRegister tmp = GetUnusedRegister(
+ RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ I64x2GtS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movaps(dst.fp(), tmp.fp());
+ } else {
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst != lhs.
+ if (dst == lhs) {
+ LiftoffRegister tmp = GetUnusedRegister(RegClass::kFpReg, {rhs},
+ LiftoffRegList::ForRegs(lhs));
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movdqa(dst.fp(), tmp.fp());
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ LiftoffRegister tmp = GetUnusedRegister(
+ RegClass::kFpReg, LiftoffRegList::ForRegs(lhs, rhs));
+ I64x2GeS(tmp.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ movaps(dst.fp(), tmp.fp());
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
+ }
+ }
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
@@ -3171,8 +3314,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -3188,8 +3331,8 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
@@ -3216,7 +3359,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
+ static constexpr RegClass tmp_rc = reg_class_for(kI32);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
@@ -3316,7 +3459,7 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
if (CpuFeatures::IsSupported(AVX)) {
@@ -3415,11 +3558,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
@@ -3546,6 +3684,18 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), liftoff::kScratchDoubleReg,
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(),
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3574,6 +3724,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
/*low=*/false, /*is_signed=*/false);
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3585,11 +3741,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
@@ -3691,6 +3842,17 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(),
+ GetUnusedRegister(kGpReg, {}).gp());
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
namespace liftoff {
// Helper function to check for register aliasing, AVX support, and moves
// registers around before calling the actual macro-assembler function.
@@ -3760,6 +3922,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
@@ -3845,7 +4012,7 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
@@ -3903,6 +4070,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
Movmskpd(dst.gp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2SConvertI32x4High(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2UConvertI32x4High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -4215,6 +4402,22 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, rhs, lhs);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2pd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtps2pd(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
// NAN->0
@@ -4241,7 +4444,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
DoubleRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, src)).fp();
// NAN->0, negative->0.
@@ -4306,6 +4509,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
liftoff::kScratchDoubleReg); // Add hi and lo, may round.
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtpd2ps(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -4345,8 +4553,7 @@ void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxbw(dst.fp(), dst.fp());
+ I16x8SConvertI8x16High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
@@ -4356,8 +4563,7 @@ void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxbw(dst.fp(), dst.fp());
+ I16x8UConvertI8x16High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
@@ -4367,8 +4573,7 @@ void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovsxwd(dst.fp(), dst.fp());
+ I32x4SConvertI16x8High(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
@@ -4378,8 +4583,19 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
LiftoffRegister src) {
- Palignr(dst.fp(), src.fp(), static_cast<uint8_t>(8));
- Pmovzxwd(dst.fp(), dst.fp());
+ I32x4UConvertI16x8High(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4418,6 +4634,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
Pabsd(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2Abs(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -4658,17 +4879,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, esp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, esp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4697,8 +4918,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, esp, 0, out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, esp, 0, out_argument_kind);
}
add(esp, Immediate(stack_bytes));
@@ -4712,7 +4933,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
jmp(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
// Since we have more cache registers than parameter registers, the
@@ -4758,26 +4979,26 @@ void LiftoffStackSlots::Construct() {
case LiftoffAssembler::VarState::kStack:
// The combination of AllocateStackSpace and 2 movdqu is usually smaller
// in code size than doing 4 pushes.
- if (src.type() == kWasmS128) {
+ if (src.kind() == kS128) {
asm_->AllocateStackSpace(sizeof(double) * 2);
asm_->movdqu(liftoff::kScratchDoubleReg,
liftoff::GetStackSlot(slot.src_offset_));
asm_->movdqu(Operand(esp, 0), liftoff::kScratchDoubleReg);
break;
}
- if (src.type() == kWasmF64) {
+ if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
}
asm_->push(liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_));
break;
case LiftoffAssembler::VarState::kRegister:
- if (src.type() == kWasmI64) {
+ if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
- kWasmI32);
+ kI32);
} else {
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
}
break;
case LiftoffAssembler::VarState::kIntConst:
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index a4d7fd1221..11b2e4993c 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -77,6 +77,26 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+#elif V8_TARGET_ARCH_PPC64
+
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(r3, r4, r5, r6, r7, r8, r9, r10, r11);
+
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12);
+
+#elif V8_TARGET_ARCH_RISCV64
+
+// Any change of kLiftoffAssemblerGpCacheRegs also need to update
+// kPushedGpRegs in frame-constants-riscv64.h
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, s7);
+
+// Any change of kLiftoffAssemblerGpCacheRegs also need to update
+// kPushedFpRegs in frame-constants-riscv64.h
+constexpr RegList kLiftoffAssemblerFpCacheRegs =
+ DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
+ fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
@@ -84,7 +104,6 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
constexpr RegList kLiftoffAssemblerFpCacheRegs = 0xff;
#endif
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
index 587430a107..3a8d7ba01e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -23,17 +23,18 @@ namespace internal {
namespace wasm {
using VarState = LiftoffAssembler::VarState;
+using ValueKindSig = LiftoffAssembler::ValueKindSig;
-constexpr ValueType LiftoffAssembler::kWasmIntPtr;
+constexpr ValueKind LiftoffAssembler::kIntPtr;
namespace {
class StackTransferRecipe {
struct RegisterMove {
LiftoffRegister src;
- ValueType type;
- constexpr RegisterMove(LiftoffRegister src, ValueType type)
- : src(src), type(type) {}
+ ValueKind kind;
+ constexpr RegisterMove(LiftoffRegister src, ValueKind kind)
+ : src(src), kind(kind) {}
};
struct RegisterLoad {
@@ -45,34 +46,34 @@ class StackTransferRecipe {
kHighHalfStack // fill a register from the high half of a stack slot.
};
- LoadKind kind;
- ValueType type;
+ LoadKind load_kind;
+ ValueKind kind;
int32_t value; // i32 constant value or stack offset, depending on kind.
// Named constructors.
static RegisterLoad Const(WasmValue constant) {
- if (constant.type() == kWasmI32) {
- return {kConstant, kWasmI32, constant.to_i32()};
+ if (constant.type().kind() == kI32) {
+ return {kConstant, kI32, constant.to_i32()};
}
- DCHECK_EQ(kWasmI64, constant.type());
- DCHECK_EQ(constant.to_i32_unchecked(), constant.to_i64_unchecked());
- return {kConstant, kWasmI64, constant.to_i32_unchecked()};
+ DCHECK_EQ(kI64, constant.type().kind());
+ int32_t i32_const = static_cast<int32_t>(constant.to_i64());
+ DCHECK_EQ(constant.to_i64(), i32_const);
+ return {kConstant, kI64, i32_const};
}
- static RegisterLoad Stack(int32_t offset, ValueType type) {
- return {kStack, type, offset};
+ static RegisterLoad Stack(int32_t offset, ValueKind kind) {
+ return {kStack, kind, offset};
}
static RegisterLoad HalfStack(int32_t offset, RegPairHalf half) {
- return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32,
- offset};
+ return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kI32, offset};
}
static RegisterLoad Nop() {
- // ValueType does not matter.
- return {kNop, kWasmI32, 0};
+ // ValueKind does not matter.
+ return {kNop, kI32, 0};
}
private:
- RegisterLoad(LoadKind kind, ValueType type, int32_t value)
- : kind(kind), type(type), value(value) {}
+ RegisterLoad(LoadKind load_kind, ValueKind kind, int32_t value)
+ : load_kind(load_kind), kind(kind), value(value) {}
};
public:
@@ -90,8 +91,23 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
+#if DEBUG
+ bool CheckCompatibleStackSlotTypes(ValueKind dst, ValueKind src) {
+ if (is_object_reference_type(dst)) {
+ // Since Liftoff doesn't do accurate type tracking (e.g. on loop back
+ // edges), we only care that pointer types stay amongst pointer types.
+ // It's fine if ref/optref overwrite each other.
+ DCHECK(is_object_reference_type(src));
+ } else {
+ // All other types (primitive numbers, RTTs, bottom/stmt) must be equal.
+ DCHECK_EQ(dst, src);
+ }
+ return true; // Dummy so this can be called via DCHECK.
+ }
+#endif
+
V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
- DCHECK_EQ(dst.type(), src.type());
+ DCHECK(CheckCompatibleStackSlotTypes(dst.kind(), src.kind()));
if (dst.is_reg()) {
LoadIntoRegister(dst.reg(), src, src.offset());
return;
@@ -104,11 +120,11 @@ class StackTransferRecipe {
switch (src.loc()) {
case VarState::kStack:
if (src.offset() != dst.offset()) {
- asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
+ asm_->MoveStackValue(dst.offset(), src.offset(), src.kind());
}
break;
case VarState::kRegister:
- asm_->Spill(dst.offset(), src.reg(), src.type());
+ asm_->Spill(dst.offset(), src.reg(), src.kind());
break;
case VarState::kIntConst:
asm_->Spill(dst.offset(), src.constant());
@@ -121,11 +137,11 @@ class StackTransferRecipe {
uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
- LoadStackSlot(dst, src_offset, src.type());
+ LoadStackSlot(dst, src_offset, src.kind());
break;
case VarState::kRegister:
DCHECK_EQ(dst.reg_class(), src.reg_class());
- if (dst != src.reg()) MoveRegister(dst, src.reg(), src.type());
+ if (dst != src.reg()) MoveRegister(dst, src.reg(), src.kind());
break;
case VarState::kIntConst:
LoadConstant(dst, src.constant());
@@ -139,7 +155,7 @@ class StackTransferRecipe {
// Use CHECK such that the remaining code is statically dead if
// {kNeedI64RegPair} is false.
CHECK(kNeedI64RegPair);
- DCHECK_EQ(kWasmI64, src.type());
+ DCHECK_EQ(kI64, src.kind());
switch (src.loc()) {
case VarState::kStack:
LoadI64HalfStackSlot(dst, offset, half);
@@ -147,7 +163,7 @@ class StackTransferRecipe {
case VarState::kRegister: {
LiftoffRegister src_half =
half == kLowWord ? src.reg().low() : src.reg().high();
- if (dst != src_half) MoveRegister(dst, src_half, kWasmI32);
+ if (dst != src_half) MoveRegister(dst, src_half, kI32);
break;
}
case VarState::kIntConst:
@@ -159,45 +175,44 @@ class StackTransferRecipe {
}
}
- void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueType type) {
+ void MoveRegister(LiftoffRegister dst, LiftoffRegister src, ValueKind kind) {
DCHECK_NE(dst, src);
DCHECK_EQ(dst.reg_class(), src.reg_class());
- DCHECK_EQ(reg_class_for(type), src.reg_class());
+ DCHECK_EQ(reg_class_for(kind), src.reg_class());
if (src.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, type);
- if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kWasmI32);
- if (dst.high() != src.high())
- MoveRegister(dst.high(), src.high(), kWasmI32);
+ DCHECK_EQ(kI64, kind);
+ if (dst.low() != src.low()) MoveRegister(dst.low(), src.low(), kI32);
+ if (dst.high() != src.high()) MoveRegister(dst.high(), src.high(), kI32);
return;
}
if (src.is_fp_pair()) {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
if (dst.low() != src.low()) {
- MoveRegister(dst.low(), src.low(), kWasmF64);
- MoveRegister(dst.high(), src.high(), kWasmF64);
+ MoveRegister(dst.low(), src.low(), kF64);
+ MoveRegister(dst.high(), src.high(), kF64);
}
return;
}
if (move_dst_regs_.has(dst)) {
DCHECK_EQ(register_move(dst)->src, src);
// Non-fp registers can only occur with the exact same type.
- DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->type == type);
+ DCHECK_IMPLIES(!dst.is_fp(), register_move(dst)->kind == kind);
// It can happen that one fp register holds both the f32 zero and the f64
// zero, as the initial value for local variables. Move the value as f64
// in that case.
- if (type == kWasmF64) register_move(dst)->type = kWasmF64;
+ if (kind == kF64) register_move(dst)->kind = kF64;
return;
}
move_dst_regs_.set(dst);
++*src_reg_use_count(src);
- *register_move(dst) = {src, type};
+ *register_move(dst) = {src, kind};
}
void LoadConstant(LiftoffRegister dst, WasmValue value) {
DCHECK(!load_dst_regs_.has(dst));
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, value.type());
+ DCHECK_EQ(kI64, value.type().kind());
int64_t i64 = value.to_i64();
*register_load(dst.low()) =
RegisterLoad::Const(WasmValue(static_cast<int32_t>(i64)));
@@ -209,7 +224,7 @@ class StackTransferRecipe {
}
void LoadStackSlot(LiftoffRegister dst, uint32_t stack_offset,
- ValueType type) {
+ ValueKind kind) {
if (load_dst_regs_.has(dst)) {
// It can happen that we spilled the same register to different stack
// slots, and then we reload them later into the same dst register.
@@ -218,20 +233,20 @@ class StackTransferRecipe {
}
load_dst_regs_.set(dst);
if (dst.is_gp_pair()) {
- DCHECK_EQ(kWasmI64, type);
+ DCHECK_EQ(kI64, kind);
*register_load(dst.low()) =
RegisterLoad::HalfStack(stack_offset, kLowWord);
*register_load(dst.high()) =
RegisterLoad::HalfStack(stack_offset, kHighWord);
} else if (dst.is_fp_pair()) {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
// Only need register_load for low_gp since we load 128 bits at one go.
// Both low and high need to be set in load_dst_regs_ but when iterating
// over it, both low and high will be cleared, so we won't load twice.
- *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, kind);
*register_load(dst.high()) = RegisterLoad::Nop();
} else {
- *register_load(dst) = RegisterLoad::Stack(stack_offset, type);
+ *register_load(dst) = RegisterLoad::Stack(stack_offset, kind);
}
}
@@ -279,7 +294,7 @@ class StackTransferRecipe {
void ExecuteMove(LiftoffRegister dst) {
RegisterMove* move = register_move(dst);
DCHECK_EQ(0, *src_reg_use_count(dst));
- asm_->Move(dst, move->src, move->type);
+ asm_->Move(dst, move->src, move->kind);
ClearExecutedMove(dst);
}
@@ -313,11 +328,11 @@ class StackTransferRecipe {
// TODO(clemensb): Use an unused register if available.
LiftoffRegister dst = move_dst_regs_.GetFirstRegSet();
RegisterMove* move = register_move(dst);
- last_spill_offset += LiftoffAssembler::SlotSizeForType(move->type);
+ last_spill_offset += LiftoffAssembler::SlotSizeForType(move->kind);
LiftoffRegister spill_reg = move->src;
- asm_->Spill(last_spill_offset, spill_reg, move->type);
+ asm_->Spill(last_spill_offset, spill_reg, move->kind);
// Remember to reload into the destination register later.
- LoadStackSlot(dst, last_spill_offset, move->type);
+ LoadStackSlot(dst, last_spill_offset, move->kind);
ClearExecutedMove(dst);
}
}
@@ -325,20 +340,20 @@ class StackTransferRecipe {
void ExecuteLoads() {
for (LiftoffRegister dst : load_dst_regs_) {
RegisterLoad* load = register_load(dst);
- switch (load->kind) {
+ switch (load->load_kind) {
case RegisterLoad::kNop:
break;
case RegisterLoad::kConstant:
- asm_->LoadConstant(dst, load->type == kWasmI64
+ asm_->LoadConstant(dst, load->kind == kI64
? WasmValue(int64_t{load->value})
: WasmValue(int32_t{load->value}));
break;
case RegisterLoad::kStack:
- if (kNeedS128RegPair && load->type == kWasmS128) {
+ if (kNeedS128RegPair && load->kind == kS128) {
asm_->Fill(LiftoffRegister::ForFpPair(dst.fp()), load->value,
- load->type);
+ load->kind);
} else {
- asm_->Fill(dst, load->value, load->type);
+ asm_->Fill(dst, load->value, load->kind);
}
break;
case RegisterLoad::kLowHalfStack:
@@ -415,18 +430,18 @@ void InitMergeRegion(LiftoffAssembler::CacheState* state,
reg = register_reuse_map.Lookup(source->reg());
}
// Third try: Use any free register.
- RegClass rc = reg_class_for(source->type());
+ RegClass rc = reg_class_for(source->kind());
if (!reg && state->has_unused_register(rc, used_regs)) {
reg = state->unused_register(rc, used_regs);
}
if (!reg) {
// No free register; make this a stack slot.
- *target = VarState(source->type(), source->offset());
+ *target = VarState(source->kind(), source->offset());
continue;
}
if (reuse_registers) register_reuse_map.Add(source->reg(), *reg);
state->inc_used(*reg);
- *target = VarState(source->type(), *reg, source->offset());
+ *target = VarState(source->kind(), *reg, source->offset());
}
}
@@ -440,6 +455,10 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// |------locals------|---(in between)----|--(discarded)--|----merge----|
// <-- num_locals --> <-- stack_depth -->^stack_base <-- arity -->
+ if (source.cached_instance != no_reg) {
+ SetInstanceCacheRegister(source.cached_instance);
+ }
+
uint32_t stack_base = stack_depth + num_locals;
uint32_t target_height = stack_base + arity;
uint32_t discarded = source.stack_height() - target_height;
@@ -514,7 +533,7 @@ void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
ZoneVector<int>* slots, LiftoffRegList* spills,
SpillLocation spill_location) {
for (const auto& slot : stack_state) {
- if (!slot.type().is_reference_type()) continue;
+ if (!is_reference_type(slot.kind())) continue;
if (spill_location == SpillLocation::kTopOfStack && slot.is_reg()) {
// Registers get spilled just before the call to the runtime. In {spills}
@@ -533,7 +552,7 @@ void LiftoffAssembler::CacheState::DefineSafepoint(Safepoint& safepoint) {
for (const auto& slot : stack_state) {
DCHECK(!slot.is_reg());
- if (slot.type().is_reference_type()) {
+ if (is_reference_type(slot.kind())) {
safepoint.DefinePointerSlot(GetSafepointIndexForStackSlot(slot));
}
}
@@ -571,12 +590,12 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) {
if (slot.is_reg()) return slot.reg();
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned);
if (slot.is_const()) {
LoadConstant(reg, slot.constant());
} else {
DCHECK(slot.is_stack());
- Fill(reg, slot.offset(), slot.type());
+ Fill(reg, slot.offset(), slot.kind());
}
return reg;
}
@@ -627,7 +646,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
if (slot.is_stack()) continue;
- RegClass rc = reg_class_for(slot.type());
+ RegClass rc = reg_class_for(slot.kind());
if (slot.is_reg()) {
if (cache_state_.get_use_count(slot.reg()) > 1) {
// If the register is used more than once, we cannot use it for the
@@ -635,7 +654,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
LiftoffRegList pinned;
pinned.set(slot.reg());
LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
- Move(dst_reg, slot.reg(), slot.type());
+ Move(dst_reg, slot.reg(), slot.kind());
cache_state_.dec_used(slot.reg());
cache_state_.inc_used(dst_reg);
slot.MakeRegister(dst_reg);
@@ -657,7 +676,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
VectorOf(stack_base, num_locals())}) {
for (VarState& slot : slots) {
if (!slot.is_const()) continue;
- RegClass rc = reg_class_for(slot.type());
+ RegClass rc = reg_class_for(slot.kind());
if (cache_state_.has_unused_register(rc)) {
LiftoffRegister reg = cache_state_.unused_register(rc);
LoadConstant(reg, slot.constant());
@@ -671,7 +690,7 @@ void LiftoffAssembler::MaterializeMergedConstants(uint32_t arity) {
}
}
-void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
+void LiftoffAssembler::MergeFullStackWith(CacheState& target,
const CacheState& source) {
DCHECK_EQ(source.stack_height(), target.stack_height());
// TODO(clemensb): Reuse the same StackTransferRecipe object to save some
@@ -680,10 +699,16 @@ void LiftoffAssembler::MergeFullStackWith(const CacheState& target,
for (uint32_t i = 0, e = source.stack_height(); i < e; ++i) {
transfers.TransferStackSlot(target.stack_state[i], source.stack_state[i]);
}
+
+ if (source.cached_instance != target.cached_instance) {
+ // Backward jumps (to loop headers) do not have a cached instance anyway, so
+ // ignore this. On forward jumps, jump reset the cached instance in the
+ // target state.
+ target.ClearCachedInstanceRegister();
+ }
}
-void LiftoffAssembler::MergeStackWith(const CacheState& target,
- uint32_t arity) {
+void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity) {
// Before: ----------------|----- (discarded) ----|--- arity ---|
// ^target_stack_height ^stack_base ^stack_height
// After: ----|-- arity --|
@@ -704,6 +729,13 @@ void LiftoffAssembler::MergeStackWith(const CacheState& target,
transfers.TransferStackSlot(target.stack_state[target_stack_base + i],
cache_state_.stack_state[stack_base + i]);
}
+
+ if (cache_state_.cached_instance != target.cached_instance) {
+ // Backward jumps (to loop headers) do not have a cached instance anyway, so
+ // ignore this. On forward jumps, jump reset the cached instance in the
+ // target state.
+ target.ClearCachedInstanceRegister();
+ }
}
void LiftoffAssembler::Spill(VarState* slot) {
@@ -711,7 +743,7 @@ void LiftoffAssembler::Spill(VarState* slot) {
case VarState::kStack:
return;
case VarState::kRegister:
- Spill(slot->offset(), slot->reg(), slot->type());
+ Spill(slot->offset(), slot->reg(), slot->kind());
cache_state_.dec_used(slot->reg());
break;
case VarState::kIntConst:
@@ -731,15 +763,20 @@ void LiftoffAssembler::SpillAllRegisters() {
for (uint32_t i = 0, e = cache_state_.stack_height(); i < e; ++i) {
auto& slot = cache_state_.stack_state[i];
if (!slot.is_reg()) continue;
- Spill(slot.offset(), slot.reg(), slot.type());
+ Spill(slot.offset(), slot.reg(), slot.kind());
slot.MakeStack();
}
+ cache_state_.ClearCachedInstanceRegister();
cache_state_.reset_used_registers();
}
void LiftoffAssembler::ClearRegister(
Register reg, std::initializer_list<Register*> possible_uses,
LiftoffRegList pinned) {
+ if (reg == cache_state()->cached_instance) {
+ cache_state()->ClearCachedInstanceRegister();
+ return;
+ }
if (cache_state()->is_used(LiftoffRegister(reg))) {
SpillRegister(LiftoffRegister(reg));
}
@@ -748,7 +785,7 @@ void LiftoffAssembler::ClearRegister(
if (reg != *use) continue;
if (replacement == no_reg) {
replacement = GetUnusedRegister(kGpReg, pinned).gp();
- Move(replacement, reg, LiftoffAssembler::kWasmIntPtr);
+ Move(replacement, reg, LiftoffAssembler::kIntPtr);
}
// We cannot leave this loop early. There may be multiple uses of {reg}.
*use = replacement;
@@ -756,7 +793,7 @@ void LiftoffAssembler::ClearRegister(
}
namespace {
-void PrepareStackTransfers(const FunctionSig* sig,
+void PrepareStackTransfers(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
const VarState* slots,
LiftoffStackSlots* stack_slots,
@@ -769,8 +806,8 @@ void PrepareStackTransfers(const FunctionSig* sig,
uint32_t num_params = static_cast<uint32_t>(sig->parameter_count());
for (uint32_t i = num_params; i > 0; --i) {
const uint32_t param = i - 1;
- ValueType type = sig->GetParam(param);
- const bool is_gp_pair = kNeedI64RegPair && type == kWasmI64;
+ ValueKind kind = sig->GetParam(param);
+ const bool is_gp_pair = kNeedI64RegPair && kind == kI64;
const int num_lowered_params = is_gp_pair ? 2 : 1;
const VarState& slot = slots[param];
const uint32_t stack_offset = slot.offset();
@@ -784,10 +821,10 @@ void PrepareStackTransfers(const FunctionSig* sig,
call_descriptor->GetInputLocation(call_desc_input_idx);
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
- RegClass rc = is_gp_pair ? kGpReg : reg_class_for(type);
+ RegClass rc = is_gp_pair ? kGpReg : reg_class_for(kind);
int reg_code = loc.AsRegister();
LiftoffRegister reg =
- LiftoffRegister::from_external_code(rc, type, reg_code);
+ LiftoffRegister::from_external_code(rc, kind, reg_code);
param_regs->set(reg);
if (is_gp_pair) {
stack_transfers->LoadI64HalfIntoRegister(reg, slot, stack_offset,
@@ -806,7 +843,7 @@ void PrepareStackTransfers(const FunctionSig* sig,
} // namespace
void LiftoffAssembler::PrepareBuiltinCall(
- const FunctionSig* sig, compiler::CallDescriptor* call_descriptor,
+ const ValueKindSig* sig, compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params) {
LiftoffStackSlots stack_slots(this);
StackTransferRecipe stack_transfers(this);
@@ -825,7 +862,7 @@ void LiftoffAssembler::PrepareBuiltinCall(
cache_state_.reset_used_registers();
}
-void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
+void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register* target,
Register* target_instance) {
@@ -834,12 +871,13 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
constexpr size_t kInputShift = 1;
// Spill all cache slots which are not being used as parameters.
+ cache_state_.ClearCachedInstanceRegister();
for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
it >= cache_state_.stack_state.begin() &&
!cache_state_.used_registers.is_empty();
--it) {
if (!it->is_reg()) continue;
- Spill(it->offset(), it->reg(), it->type());
+ Spill(it->offset(), it->reg(), it->kind());
cache_state_.dec_used(it->reg());
it->MakeStack();
}
@@ -856,8 +894,7 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
param_regs.set(instance_reg);
if (target_instance && *target_instance != instance_reg) {
stack_transfers.MoveRegister(LiftoffRegister(instance_reg),
- LiftoffRegister(*target_instance),
- kWasmIntPtr);
+ LiftoffRegister(*target_instance), kIntPtr);
}
if (num_params) {
@@ -875,10 +912,10 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
if (!free_regs.is_empty()) {
LiftoffRegister new_target = free_regs.GetFirstRegSet();
stack_transfers.MoveRegister(new_target, LiftoffRegister(*target),
- kWasmIntPtr);
+ kIntPtr);
*target = new_target.gp();
} else {
- stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kWasmIntPtr,
+ stack_slots.Add(LiftoffAssembler::VarState(LiftoffAssembler::kIntPtr,
LiftoffRegister(*target), 0));
*target = no_reg;
}
@@ -900,15 +937,15 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
}
}
-void LiftoffAssembler::FinishCall(const FunctionSig* sig,
+void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor) {
int call_desc_return_idx = 0;
- for (ValueType return_type : sig->returns()) {
+ for (ValueKind return_kind : sig->returns()) {
DCHECK_LT(call_desc_return_idx, call_descriptor->ReturnCount());
- const bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ const bool needs_gp_pair = needs_gp_reg_pair(return_kind);
const int num_lowered_params = 1 + needs_gp_pair;
- const ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- const RegClass rc = reg_class_for(lowered_type);
+ const ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ const RegClass rc = reg_class_for(lowered_kind);
// Initialize to anything, will be set in the loop and used afterwards.
LiftoffRegister reg_pair[2] = {kGpCacheRegList.GetFirstRegSet(),
kGpCacheRegList.GetFirstRegSet()};
@@ -919,7 +956,7 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
reg_pair[pair_idx] = LiftoffRegister::from_external_code(
- rc, lowered_type, loc.AsRegister());
+ rc, lowered_kind, loc.AsRegister());
} else {
DCHECK(loc.IsCallerFrameSlot());
reg_pair[pair_idx] = GetUnusedRegister(rc, pinned);
@@ -927,16 +964,16 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
int offset = call_descriptor->GetOffsetToReturns();
int return_slot = -loc.GetLocation() - offset - 1;
LoadReturnStackSlot(reg_pair[pair_idx],
- return_slot * kSystemPointerSize, lowered_type);
+ return_slot * kSystemPointerSize, lowered_kind);
}
if (pair_idx == 0) {
pinned.set(reg_pair[0]);
}
}
if (num_lowered_params == 1) {
- PushRegister(return_type, reg_pair[0]);
+ PushRegister(return_kind, reg_pair[0]);
} else {
- PushRegister(return_type, LiftoffRegister::ForPair(reg_pair[0].gp(),
+ PushRegister(return_kind, LiftoffRegister::ForPair(reg_pair[0].gp(),
reg_pair[1].gp()));
}
}
@@ -945,21 +982,21 @@ void LiftoffAssembler::FinishCall(const FunctionSig* sig,
}
void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_EQ(dst.reg_class(), src.reg_class());
DCHECK_NE(dst, src);
if (kNeedI64RegPair && dst.is_gp_pair()) {
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
- StackTransferRecipe(this).MoveRegister(dst, src, type);
+ StackTransferRecipe(this).MoveRegister(dst, src, kind);
} else if (kNeedS128RegPair && dst.is_fp_pair()) {
- // Calling low_fp is fine, Move will automatically check the type and
+ // Calling low_fp is fine, Move will automatically check the kind and
// convert this FP to its SIMD register, and use a SIMD move.
- Move(dst.low_fp(), src.low_fp(), type);
+ Move(dst.low_fp(), src.low_fp(), kind);
} else if (dst.is_gp()) {
- Move(dst.gp(), src.gp(), type);
+ Move(dst.gp(), src.gp(), kind);
} else {
- Move(dst.fp(), src.fp(), type);
+ Move(dst.fp(), src.fp(), kind);
}
}
@@ -968,7 +1005,7 @@ void LiftoffAssembler::ParallelRegisterMove(
StackTransferRecipe stack_transfers(this);
for (auto tuple : tuples) {
if (tuple.dst == tuple.src) continue;
- stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.type);
+ stack_transfers.MoveRegister(tuple.dst, tuple.src, tuple.kind);
}
}
@@ -976,19 +1013,19 @@ void LiftoffAssembler::MoveToReturnLocations(
const FunctionSig* sig, compiler::CallDescriptor* descriptor) {
StackTransferRecipe stack_transfers(this);
if (sig->return_count() == 1) {
- ValueType return_type = sig->GetReturn(0);
- // Defaults to a gp reg, will be set below if return type is not gp.
+ ValueKind return_kind = sig->GetReturn(0).kind();
+ // Defaults to a gp reg, will be set below if return kind is not gp.
LiftoffRegister return_reg = LiftoffRegister(kGpReturnRegisters[0]);
- if (needs_gp_reg_pair(return_type)) {
+ if (needs_gp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForPair(kGpReturnRegisters[0],
kGpReturnRegisters[1]);
- } else if (needs_fp_reg_pair(return_type)) {
+ } else if (needs_fp_reg_pair(return_kind)) {
return_reg = LiftoffRegister::ForFpPair(kFpReturnRegisters[0]);
- } else if (reg_class_for(return_type) == kFpReg) {
+ } else if (reg_class_for(return_kind) == kFpReg) {
return_reg = LiftoffRegister(kFpReturnRegisters[0]);
} else {
- DCHECK_EQ(kGpReg, reg_class_for(return_type));
+ DCHECK_EQ(kGpReg, reg_class_for(return_kind));
}
stack_transfers.LoadIntoRegister(return_reg,
cache_state_.stack_state.back(),
@@ -1003,8 +1040,8 @@ void LiftoffAssembler::MoveToReturnLocations(
// Fill return frame slots first to ensure that all potential spills happen
// before we prepare the stack transfers.
for (size_t i = 0; i < sig->return_count(); ++i) {
- ValueType return_type = sig->GetReturn(i);
- bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ ValueKind return_kind = sig->GetReturn(i).kind();
+ bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
compiler::LinkageLocation loc =
@@ -1015,16 +1052,16 @@ void LiftoffAssembler::MoveToReturnLocations(
LiftoffRegister reg = needs_gp_pair
? LoadI64HalfIntoRegister(slot, half)
: LoadToRegister(slot, {});
- ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_type);
+ ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ StoreCallerFrameSlot(reg, -loc.AsCallerFrameSlot(), lowered_kind);
}
}
}
// Prepare and execute stack transfers.
call_desc_return_idx = 0;
for (size_t i = 0; i < sig->return_count(); ++i) {
- ValueType return_type = sig->GetReturn(i);
- bool needs_gp_pair = needs_gp_reg_pair(return_type);
+ ValueKind return_kind = sig->GetReturn(i).kind();
+ bool needs_gp_pair = needs_gp_reg_pair(return_kind);
int num_lowered_params = 1 + needs_gp_pair;
for (int pair_idx = 0; pair_idx < num_lowered_params; ++pair_idx) {
RegPairHalf half = pair_idx == 0 ? kLowWord : kHighWord;
@@ -1033,10 +1070,10 @@ void LiftoffAssembler::MoveToReturnLocations(
if (loc.IsRegister()) {
DCHECK(!loc.IsAnyRegister());
int reg_code = loc.AsRegister();
- ValueType lowered_type = needs_gp_pair ? kWasmI32 : return_type;
- RegClass rc = reg_class_for(lowered_type);
+ ValueKind lowered_kind = needs_gp_pair ? kI32 : return_kind;
+ RegClass rc = reg_class_for(lowered_kind);
LiftoffRegister reg =
- LiftoffRegister::from_external_code(rc, return_type, reg_code);
+ LiftoffRegister::from_external_code(rc, return_kind, reg_code);
VarState& slot = slots[i];
if (needs_gp_pair) {
stack_transfers.LoadI64HalfIntoRegister(reg, slot, slot.offset(),
@@ -1064,6 +1101,14 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
used_regs.set(reg);
}
+ if (cache_state_.cached_instance != no_reg) {
+ DCHECK(!used_regs.has(cache_state_.cached_instance));
+ int liftoff_code =
+ LiftoffRegister{cache_state_.cached_instance}.liftoff_code();
+ used_regs.set(cache_state_.cached_instance);
+ DCHECK_EQ(0, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 1;
+ }
bool valid = memcmp(register_use_count, cache_state_.register_use_count,
sizeof(register_use_count)) == 0 &&
used_regs == cache_state_.used_registers;
@@ -1079,10 +1124,9 @@ bool LiftoffAssembler::ValidateCacheState() const {
}
#endif
-LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned) {
+LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) {
// Spill one cached value to free a register.
- LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates, pinned);
+ LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates);
SpillRegister(spill_reg);
return spill_reg;
}
@@ -1114,7 +1158,7 @@ LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters(
// b. If used, spill it.
// We spill one register in 2 and 3a, and two registers in 3b.
- LiftoffRegister first_reg = GetUnusedRegister(kFpCacheRegList, pinned);
+ LiftoffRegister first_reg = GetUnusedRegister(kFpReg, pinned);
LiftoffRegister second_reg = first_reg, low_reg = first_reg;
if (first_reg.fp().code() % 2 == 0) {
@@ -1148,7 +1192,7 @@ void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {
cache_state_.last_spilled_regs.set(slot->reg().low());
cache_state_.last_spilled_regs.set(slot->reg().high());
}
- Spill(slot->offset(), slot->reg(), slot->type());
+ Spill(slot->offset(), slot->reg(), slot->kind());
slot->MakeStack();
if (--remaining_uses == 0) break;
}
@@ -1160,14 +1204,14 @@ void LiftoffAssembler::set_num_locals(uint32_t num_locals) {
DCHECK_EQ(0, num_locals_); // only call this once.
num_locals_ = num_locals;
if (num_locals > kInlineLocalTypes) {
- more_local_types_ = reinterpret_cast<ValueType*>(
- base::Malloc(num_locals * sizeof(ValueType)));
+ more_local_types_ = reinterpret_cast<ValueKind*>(
+ base::Malloc(num_locals * sizeof(ValueKind)));
DCHECK_NOT_NULL(more_local_types_);
}
}
std::ostream& operator<<(std::ostream& os, VarState slot) {
- os << slot.type().name() << ":";
+ os << name(slot.kind()) << ":";
switch (slot.loc()) {
case VarState::kStack:
return os << "s";
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 94f91ab0fd..13c0d45c1e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -73,25 +73,26 @@ class LiftoffAssembler : public TurboAssembler {
// Each slot in our stack frame currently has exactly 8 bytes.
static constexpr int kStackSlotSize = 8;
- static constexpr ValueType kWasmIntPtr =
- kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
+ static constexpr ValueKind kIntPtr = kSystemPointerSize == 8 ? kI64 : kI32;
+
+ using ValueKindSig = Signature<ValueKind>;
class VarState {
public:
enum Location : uint8_t { kStack, kRegister, kIntConst };
- explicit VarState(ValueType type, int offset)
- : loc_(kStack), type_(type), spill_offset_(offset) {}
- explicit VarState(ValueType type, LiftoffRegister r, int offset)
- : loc_(kRegister), type_(type), reg_(r), spill_offset_(offset) {
- DCHECK_EQ(r.reg_class(), reg_class_for(type));
+ explicit VarState(ValueKind kind, int offset)
+ : loc_(kStack), kind_(kind), spill_offset_(offset) {}
+ explicit VarState(ValueKind kind, LiftoffRegister r, int offset)
+ : loc_(kRegister), kind_(kind), reg_(r), spill_offset_(offset) {
+ DCHECK_EQ(r.reg_class(), reg_class_for(kind));
}
- explicit VarState(ValueType type, int32_t i32_const, int offset)
+ explicit VarState(ValueKind kind, int32_t i32_const, int offset)
: loc_(kIntConst),
- type_(type),
+ kind_(kind),
i32_const_(i32_const),
spill_offset_(offset) {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
}
bool is_stack() const { return loc_ == kStack; }
@@ -100,7 +101,7 @@ class LiftoffAssembler : public TurboAssembler {
bool is_reg() const { return loc_ == kRegister; }
bool is_const() const { return loc_ == kIntConst; }
- ValueType type() const { return type_; }
+ ValueKind kind() const { return kind_; }
Location loc() const { return loc_; }
@@ -109,10 +110,10 @@ class LiftoffAssembler : public TurboAssembler {
return i32_const_;
}
WasmValue constant() const {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
DCHECK_EQ(loc_, kIntConst);
- return type_ == kWasmI32 ? WasmValue(i32_const_)
- : WasmValue(int64_t{i32_const_});
+ return kind_ == kI32 ? WasmValue(i32_const_)
+ : WasmValue(int64_t{i32_const_});
}
int offset() const { return spill_offset_; }
@@ -133,7 +134,7 @@ class LiftoffAssembler : public TurboAssembler {
}
void MakeConstant(int32_t i32_const) {
- DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
+ DCHECK(kind_ == kI32 || kind_ == kI64);
loc_ = kIntConst;
i32_const_ = i32_const;
}
@@ -142,7 +143,7 @@ class LiftoffAssembler : public TurboAssembler {
// from different stack states.
void Copy(VarState src) {
loc_ = src.loc();
- type_ = src.type();
+ kind_ = src.kind();
if (loc_ == kRegister) {
reg_ = src.reg();
} else if (loc_ == kIntConst) {
@@ -154,7 +155,7 @@ class LiftoffAssembler : public TurboAssembler {
Location loc_;
// TODO(wasm): This is redundant, the decoder already knows the type of each
// stack value. Try to collapse.
- ValueType type_;
+ ValueKind kind_;
union {
LiftoffRegister reg_; // used if loc_ == kRegister
@@ -192,6 +193,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegList used_registers;
uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
LiftoffRegList last_spilled_regs;
+ Register cached_instance = no_reg;
bool has_unused_register(RegClass rc, LiftoffRegList pinned = {}) const {
if (kNeedI64RegPair && rc == kGpRegPair) {
@@ -205,13 +207,11 @@ class LiftoffAssembler : public TurboAssembler {
}
DCHECK(rc == kGpReg || rc == kFpReg);
LiftoffRegList candidates = GetCacheRegList(rc);
- return has_unused_register(candidates, pinned);
+ return has_unused_register(candidates.MaskOut(pinned));
}
- bool has_unused_register(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) const {
- LiftoffRegList available_regs =
- candidates.MaskOut(used_registers).MaskOut(pinned);
+ bool has_unused_register(LiftoffRegList candidates) const {
+ LiftoffRegList available_regs = candidates.MaskOut(used_registers);
return !available_regs.is_empty();
}
@@ -241,6 +241,52 @@ class LiftoffAssembler : public TurboAssembler {
return available_regs.GetFirstRegSet();
}
+ // Volatile registers are registers which are used for caching values that
+ // can easily be reloaded. Those are returned first if we run out of free
+ // registers.
+ // Note: This interface is a bit more generic than currently needed, in
+ // anticipation of more "volatile registers" being added later.
+ bool has_volatile_register(LiftoffRegList candidates) {
+ return cached_instance != no_reg && candidates.has(cached_instance);
+ }
+
+ LiftoffRegister take_volatile_register(LiftoffRegList candidates) {
+ DCHECK(candidates.has(cached_instance));
+ LiftoffRegister ret{cached_instance};
+ DCHECK_EQ(1, register_use_count[ret.liftoff_code()]);
+ register_use_count[ret.liftoff_code()] = 0;
+ used_registers.clear(ret);
+ cached_instance = no_reg;
+ return ret;
+ }
+
+ void SetInstanceCacheRegister(Register reg) {
+ DCHECK_EQ(no_reg, cached_instance);
+ cached_instance = reg;
+ int liftoff_code = LiftoffRegister{reg}.liftoff_code();
+ DCHECK_EQ(0, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 1;
+ used_registers.set(reg);
+ }
+
+ Register TrySetCachedInstanceRegister(LiftoffRegList pinned) {
+ DCHECK_EQ(no_reg, cached_instance);
+ LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
+ if (!has_unused_register(candidates)) return no_reg;
+ SetInstanceCacheRegister(unused_register(candidates).gp());
+ DCHECK_NE(no_reg, cached_instance);
+ return cached_instance;
+ }
+
+ void ClearCachedInstanceRegister() {
+ if (cached_instance == no_reg) return;
+ int liftoff_code = LiftoffRegister{cached_instance}.liftoff_code();
+ DCHECK_EQ(1, register_use_count[liftoff_code]);
+ register_use_count[liftoff_code] = 0;
+ used_registers.clear(cached_instance);
+ cached_instance = no_reg;
+ }
+
void inc_used(LiftoffRegister reg) {
if (reg.is_pair()) {
inc_used(reg.low());
@@ -294,15 +340,13 @@ class LiftoffAssembler : public TurboAssembler {
memset(register_use_count, 0, sizeof(register_use_count));
}
- LiftoffRegister GetNextSpillReg(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) {
- LiftoffRegList unpinned = candidates.MaskOut(pinned);
- DCHECK(!unpinned.is_empty());
+ LiftoffRegister GetNextSpillReg(LiftoffRegList candidates) {
+ DCHECK(!candidates.is_empty());
// This method should only be called if none of the candidates is free.
- DCHECK(unpinned.MaskOut(used_registers).is_empty());
- LiftoffRegList unspilled = unpinned.MaskOut(last_spilled_regs);
+ DCHECK(candidates.MaskOut(used_registers).is_empty());
+ LiftoffRegList unspilled = candidates.MaskOut(last_spilled_regs);
if (unspilled.is_empty()) {
- unspilled = unpinned;
+ unspilled = candidates;
last_spilled_regs = {};
}
LiftoffRegister reg = unspilled.GetFirstRegSet();
@@ -345,13 +389,13 @@ class LiftoffAssembler : public TurboAssembler {
// Use this to pop a value into a register that has no other uses, so it
// can be modified.
LiftoffRegister PopToModifiableRegister(LiftoffRegList pinned = {}) {
- ValueType type = cache_state_.stack_state.back().type();
+ ValueKind kind = cache_state_.stack_state.back().kind();
LiftoffRegister reg = PopToRegister(pinned);
if (cache_state()->is_free(reg)) return reg;
pinned.set(reg);
LiftoffRegister new_reg = GetUnusedRegister(reg.reg_class(), pinned);
- Move(new_reg, reg, type);
+ Move(new_reg, reg, kind);
return new_reg;
}
@@ -370,10 +414,10 @@ class LiftoffAssembler : public TurboAssembler {
// stack, so that we can merge different values on the back-edge.
void PrepareLoopArgs(int num);
- int NextSpillOffset(ValueType type) {
- int offset = TopSpillOffset() + SlotSizeForType(type);
- if (NeedsAlignment(type)) {
- offset = RoundUp(offset, SlotSizeForType(type));
+ int NextSpillOffset(ValueKind kind) {
+ int offset = TopSpillOffset() + SlotSizeForType(kind);
+ if (NeedsAlignment(kind)) {
+ offset = RoundUp(offset, SlotSizeForType(kind));
}
return offset;
}
@@ -384,25 +428,25 @@ class LiftoffAssembler : public TurboAssembler {
: cache_state_.stack_state.back().offset();
}
- void PushRegister(ValueType type, LiftoffRegister reg) {
- DCHECK_EQ(reg_class_for(type), reg.reg_class());
+ void PushRegister(ValueKind kind, LiftoffRegister reg) {
+ DCHECK_EQ(reg_class_for(kind), reg.reg_class());
cache_state_.inc_used(reg);
- cache_state_.stack_state.emplace_back(type, reg, NextSpillOffset(type));
+ cache_state_.stack_state.emplace_back(kind, reg, NextSpillOffset(kind));
}
- void PushConstant(ValueType type, int32_t i32_const) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
- cache_state_.stack_state.emplace_back(type, i32_const,
- NextSpillOffset(type));
+ void PushConstant(ValueKind kind, int32_t i32_const) {
+ DCHECK(kind == kI32 || kind == kI64);
+ cache_state_.stack_state.emplace_back(kind, i32_const,
+ NextSpillOffset(kind));
}
- void PushStack(ValueType type) {
- cache_state_.stack_state.emplace_back(type, NextSpillOffset(type));
+ void PushStack(ValueKind kind) {
+ cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind));
}
void SpillRegister(LiftoffRegister);
- uint32_t GetNumUses(LiftoffRegister reg) {
+ uint32_t GetNumUses(LiftoffRegister reg) const {
return cache_state_.get_use_count(reg);
}
@@ -421,9 +465,9 @@ class LiftoffAssembler : public TurboAssembler {
// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
- LiftoffRegList candidates = kGpCacheRegList;
- Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
- Register high = GetUnusedRegister(candidates, pinned).gp();
+ LiftoffRegList candidates = kGpCacheRegList.MaskOut(pinned);
+ Register low = candidates.clear(GetUnusedRegister(candidates)).gp();
+ Register high = GetUnusedRegister(candidates).gp();
return LiftoffRegister::ForPair(low, high);
} else if (kNeedS128RegPair && rc == kFpRegPair) {
// kFpRegPair specific logic here because we need adjacent registers, not
@@ -435,23 +479,26 @@ class LiftoffAssembler : public TurboAssembler {
return LiftoffRegister::ForFpPair(low_fp);
}
DCHECK(rc == kGpReg || rc == kFpReg);
- LiftoffRegList candidates = GetCacheRegList(rc);
- return GetUnusedRegister(candidates, pinned);
+ LiftoffRegList candidates = GetCacheRegList(rc).MaskOut(pinned);
+ return GetUnusedRegister(candidates);
}
// Get an unused register of {candidates}, potentially spilling to free one.
- LiftoffRegister GetUnusedRegister(LiftoffRegList candidates,
- LiftoffRegList pinned = {}) {
- if (cache_state_.has_unused_register(candidates, pinned)) {
- return cache_state_.unused_register(candidates, pinned);
+ LiftoffRegister GetUnusedRegister(LiftoffRegList candidates) {
+ DCHECK(!candidates.is_empty());
+ if (cache_state_.has_unused_register(candidates)) {
+ return cache_state_.unused_register(candidates);
}
- return SpillOneRegister(candidates, pinned);
+ if (cache_state_.has_volatile_register(candidates)) {
+ return cache_state_.take_volatile_register(candidates);
+ }
+ return SpillOneRegister(candidates);
}
void MaterializeMergedConstants(uint32_t arity);
- void MergeFullStackWith(const CacheState& target, const CacheState& source);
- void MergeStackWith(const CacheState& target, uint32_t arity);
+ void MergeFullStackWith(CacheState& target, const CacheState& source);
+ void MergeStackWith(CacheState& target, uint32_t arity);
void Spill(VarState* slot);
void SpillLocals();
@@ -469,7 +516,12 @@ class LiftoffAssembler : public TurboAssembler {
template <typename... Regs>
void SpillRegisters(Regs... regs) {
for (LiftoffRegister r : {LiftoffRegister(regs)...}) {
- if (cache_state()->is_used(r)) SpillRegister(r);
+ if (cache_state_.is_free(r)) continue;
+ if (r.is_gp() && cache_state_.cached_instance == r.gp()) {
+ cache_state_.ClearCachedInstanceRegister();
+ } else {
+ SpillRegister(r);
+ }
}
}
@@ -484,32 +536,32 @@ class LiftoffAssembler : public TurboAssembler {
}
// Load parameters into the right registers / stack slots for the call.
- void PrepareBuiltinCall(const FunctionSig* sig,
+ void PrepareBuiltinCall(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
std::initializer_list<VarState> params);
// Load parameters into the right registers / stack slots for the call.
// Move {*target} into another register if needed and update {*target} to that
// register, or {no_reg} if target was spilled to the stack.
- void PrepareCall(const FunctionSig*, compiler::CallDescriptor*,
+ void PrepareCall(const ValueKindSig*, compiler::CallDescriptor*,
Register* target = nullptr,
Register* target_instance = nullptr);
// Process return values of the call.
- void FinishCall(const FunctionSig*, compiler::CallDescriptor*);
+ void FinishCall(const ValueKindSig*, compiler::CallDescriptor*);
// Move {src} into {dst}. {src} and {dst} must be different.
- void Move(LiftoffRegister dst, LiftoffRegister src, ValueType);
+ void Move(LiftoffRegister dst, LiftoffRegister src, ValueKind);
- // Parallel register move: For a list of tuples <dst, src, type>, move the
- // {src} register of type {type} into {dst}. If {src} equals {dst}, ignore
+ // Parallel register move: For a list of tuples <dst, src, kind>, move the
+ // {src} register of kind {kind} into {dst}. If {src} equals {dst}, ignore
// that tuple.
struct ParallelRegisterMoveTuple {
LiftoffRegister dst;
LiftoffRegister src;
- ValueType type;
+ ValueKind kind;
template <typename Dst, typename Src>
- ParallelRegisterMoveTuple(Dst dst, Src src, ValueType type)
- : dst(dst), src(src), type(type) {}
+ ParallelRegisterMoveTuple(Dst dst, Src src, ValueKind kind)
+ : dst(dst), src(src), kind(kind) {}
};
void ParallelRegisterMove(Vector<const ParallelRegisterMoveTuple>);
@@ -543,33 +595,45 @@ class LiftoffAssembler : public TurboAssembler {
inline void FinishCode();
inline void AbortCompilation();
inline static constexpr int StaticStackFrameSize();
- inline static int SlotSizeForType(ValueType type);
- inline static bool NeedsAlignment(ValueType type);
+ inline static int SlotSizeForType(ValueKind kind);
+ inline static bool NeedsAlignment(ValueKind kind);
inline void LoadConstant(LiftoffRegister, WasmValue,
RelocInfo::Mode rmode = RelocInfo::NONE);
- inline void LoadFromInstance(Register dst, int offset, int size);
- inline void LoadTaggedPointerFromInstance(Register dst, int offset);
+ inline void LoadInstanceFromFrame(Register dst);
+ inline void LoadFromInstance(Register dst, Register instance, int offset,
+ int size);
+ inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
+ int offset);
inline void SpillInstance(Register instance);
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
LiftoffRegList pinned);
+ enum SkipWriteBarrier : bool {
+ kSkipWriteBarrier = true,
+ kNoSkipWriteBarrier = false
+ };
inline void StoreTaggedPointer(Register dst_addr, Register offset_reg,
int32_t offset_imm, LiftoffRegister src,
- LiftoffRegList pinned);
+ LiftoffRegList pinned,
+ SkipWriteBarrier = kNoSkipWriteBarrier);
inline void LoadFixedArrayLengthAsInt32(LiftoffRegister dst, Register array,
LiftoffRegList pinned) {
int offset = FixedArray::kLengthOffset - kHeapObjectTag;
+ LoadTaggedSignedAsInt32(dst, array, offset, pinned);
+ }
+ inline void LoadTaggedSignedAsInt32(LiftoffRegister dst, Register src_addr,
+ int32_t offset, LiftoffRegList pinned) {
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
DCHECK_EQ(kSmiShiftSize + kSmiTagSize, 4 * kBitsPerByte);
offset += 4;
#endif
- Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
} else {
DCHECK(SmiValuesAre31Bits());
- Load(dst, array, no_reg, offset, LoadType::kI32Load, pinned);
+ Load(dst, src_addr, no_reg, offset, LoadType::kI32Load, pinned);
emit_i32_sari(dst.gp(), dst.gp(), kSmiTagSize);
}
}
@@ -622,19 +686,19 @@ class LiftoffAssembler : public TurboAssembler {
inline void AtomicFence();
inline void LoadCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
- ValueType);
+ ValueKind);
inline void StoreCallerFrameSlot(LiftoffRegister, uint32_t caller_slot_idx,
- ValueType);
- inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueType);
+ ValueKind);
+ inline void LoadReturnStackSlot(LiftoffRegister, int offset, ValueKind);
inline void MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType);
+ ValueKind);
- inline void Move(Register dst, Register src, ValueType);
- inline void Move(DoubleRegister dst, DoubleRegister src, ValueType);
+ inline void Move(Register dst, Register src, ValueKind);
+ inline void Move(DoubleRegister dst, DoubleRegister src, ValueKind);
- inline void Spill(int offset, LiftoffRegister, ValueType);
+ inline void Spill(int offset, LiftoffRegister, ValueKind);
inline void Spill(int offset, WasmValue);
- inline void Fill(LiftoffRegister, int offset, ValueType);
+ inline void Fill(LiftoffRegister, int offset, ValueKind);
// Only used on 32-bit systems: Fill a register from a "half stack slot", i.e.
// 4 bytes on the stack holding half of a 64-bit value.
inline void FillI64Half(Register, int offset, RegPairHalf);
@@ -777,7 +841,7 @@ class LiftoffAssembler : public TurboAssembler {
emit_type_conversion(kExprI64UConvertI32, LiftoffRegister(dst),
LiftoffRegister(src));
} else if (dst != src) {
- Move(dst, src, kWasmI32);
+ Move(dst, src, kI32);
}
}
@@ -843,7 +907,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_jump(Label*);
inline void emit_jump(Register);
- inline void emit_cond_jump(LiftoffCondition, Label*, ValueType value,
+ inline void emit_cond_jump(LiftoffCondition, Label*, ValueKind value,
Register lhs, Register rhs = no_reg);
inline void emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, Label* label,
Register lhs, int imm);
@@ -863,7 +927,7 @@ class LiftoffAssembler : public TurboAssembler {
// should be emitted instead.
inline bool emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
- LiftoffRegister false_value, ValueType type);
+ LiftoffRegister false_value, ValueKind kind);
enum SmiCheckMode { kJumpOnSmi, kJumpOnNotSmi };
inline void emit_smi_check(Register obj, Label* target, SmiCheckMode mode);
@@ -875,11 +939,15 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadLane(LiftoffRegister dst, LiftoffRegister src, Register addr,
Register offset_reg, uintptr_t offset_imm, LoadType type,
uint8_t lane, uint32_t* protected_load_pc);
+ inline void StoreLane(Register dst, Register offset, uintptr_t offset_imm,
+ LiftoffRegister src, StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc);
inline void emit_i8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, const uint8_t shuffle[16],
bool is_swizzle);
inline void emit_i8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i8x16_popcnt(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
@@ -922,6 +990,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
@@ -949,7 +1025,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister mask);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v8x16_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v128_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -987,7 +1063,6 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v16x8_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1024,6 +1099,10 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
@@ -1036,8 +1115,10 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
+ inline void emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
- inline void emit_v32x4_anytrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1068,6 +1149,10 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i32x4_dot_i16x8_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2);
@@ -1081,6 +1166,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i64x2_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v64x2_alltrue(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1112,6 +1198,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister src1,
LiftoffRegister src2);
inline void emit_i64x2_bitmask(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_f32x4_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_neg(LiftoffRegister dst, LiftoffRegister src);
inline void emit_f32x4_sqrt(LiftoffRegister dst, LiftoffRegister src);
@@ -1158,6 +1252,18 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src);
inline void emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -1205,6 +1311,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_abs(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i64x2_abs(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx);
@@ -1261,18 +1368,18 @@ class LiftoffAssembler : public TurboAssembler {
inline void DropStackSlotsAndRet(uint32_t num_stack_slots);
// Execute a C call. Arguments are pushed to the stack and a pointer to this
- // region is passed to the C function. If {out_argument_type != kWasmStmt},
+ // region is passed to the C function. If {out_argument_kind != kStmt},
// this is the return value of the C function, stored in {rets[0]}. Further
// outputs (specified in {sig->returns()}) are read from the buffer and stored
// in the remaining {rets} registers.
- inline void CallC(const FunctionSig* sig, const LiftoffRegister* args,
- const LiftoffRegister* rets, ValueType out_argument_type,
+ inline void CallC(const ValueKindSig* sig, const LiftoffRegister* args,
+ const LiftoffRegister* rets, ValueKind out_argument_kind,
int stack_bytes, ExternalReference ext_ref);
inline void CallNativeWasmCode(Address addr);
inline void TailCallNativeWasmCode(Address addr);
// Indirect call: If {target == no_reg}, then pop the target from the stack.
- inline void CallIndirect(const FunctionSig* sig,
+ inline void CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target);
inline void TailCallIndirect(Register target);
@@ -1293,17 +1400,17 @@ class LiftoffAssembler : public TurboAssembler {
int GetTotalFrameSize() const { return max_used_spill_offset_; }
- ValueType local_type(uint32_t index) {
+ ValueKind local_type(uint32_t index) {
DCHECK_GT(num_locals_, index);
- ValueType* locals =
+ ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
return locals[index];
}
- void set_local_type(uint32_t index, ValueType type) {
- ValueType* locals =
+ void set_local_type(uint32_t index, ValueKind kind) {
+ ValueKind* locals =
num_locals_ <= kInlineLocalTypes ? local_types_ : more_local_types_;
- locals[index] = type;
+ locals[index] = kind;
}
CacheState* cache_state() { return &cache_state_; }
@@ -1325,13 +1432,13 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
- static constexpr uint32_t kInlineLocalTypes = 8;
+ static constexpr uint32_t kInlineLocalTypes = 16;
union {
- ValueType local_types_[kInlineLocalTypes];
- ValueType* more_local_types_;
+ ValueKind local_types_[kInlineLocalTypes];
+ ValueKind* more_local_types_;
};
- static_assert(sizeof(ValueType) == 4,
- "Reconsider this inlining if ValueType gets bigger");
+ static_assert(sizeof(ValueKind) == 1,
+ "Reconsider this inlining if ValueKind gets bigger");
CacheState cache_state_;
// The maximum spill offset for slots in the value stack.
int max_used_spill_offset_ = StaticStackFrameSize();
@@ -1340,8 +1447,7 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
- V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
@@ -1378,7 +1484,7 @@ void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
- assm->Move(dst.low_gp(), tmp, kWasmI32);
+ assm->Move(dst.low_gp(), tmp, kI32);
}
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
@@ -1406,7 +1512,7 @@ void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs)).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
- assm->Move(dst.low_gp(), tmp, kWasmI32);
+ assm->Move(dst.low_gp(), tmp, kI32);
}
} // namespace liftoff
@@ -1506,6 +1612,8 @@ class LiftoffStackSlots {
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h"
#else
#error Unsupported architecture.
#endif
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index 9a42bbf50c..01264e4e38 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -62,15 +62,16 @@ struct assert_field_size {
#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
FIELD_SIZE(WasmInstanceObject::k##name##Offset)
-#define LOAD_INSTANCE_FIELD(dst, name, load_size) \
- __ LoadFromInstance(dst, WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
+#define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned) \
+ __ LoadFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
+ WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
load_size>::size);
-#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name) \
- static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
- "field in WasmInstance does not have the expected size"); \
- __ LoadTaggedPointerFromInstance(dst, \
+#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned) \
+ static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
+ "field in WasmInstance does not have the expected size"); \
+ __ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef DEBUG
@@ -85,8 +86,13 @@ struct assert_field_size {
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
-constexpr ValueType kPointerValueType =
- kSystemPointerSize == 8 ? kWasmI64 : kWasmI32;
+constexpr ValueKind kPointerValueType = kSystemPointerSize == 8 ? kI64 : kI32;
+
+#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
+constexpr ValueKind kSmiValueType = kI32;
+#else
+constexpr ValueKind kSmiValueType = kI64;
+#endif
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
@@ -156,6 +162,9 @@ constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
// Builds a {DebugSideTable}.
class DebugSideTableBuilder {
+ using Entry = DebugSideTable::Entry;
+ using Value = Entry::Value;
+
public:
enum AssumeSpilling {
// All register values will be spilled before the pc covered by the debug
@@ -170,12 +179,28 @@ class DebugSideTableBuilder {
class EntryBuilder {
public:
- explicit EntryBuilder(int pc_offset,
- std::vector<DebugSideTable::Entry::Value> values)
- : pc_offset_(pc_offset), values_(std::move(values)) {}
+ explicit EntryBuilder(int pc_offset, int stack_height,
+ std::vector<Value> changed_values)
+ : pc_offset_(pc_offset),
+ stack_height_(stack_height),
+ changed_values_(std::move(changed_values)) {}
+
+ Entry ToTableEntry() {
+ return Entry{pc_offset_, stack_height_, std::move(changed_values_)};
+ }
- DebugSideTable::Entry ToTableEntry() {
- return DebugSideTable::Entry{pc_offset_, std::move(values_)};
+ void MinimizeBasedOnPreviousStack(const std::vector<Value>& last_values) {
+ auto dst = changed_values_.begin();
+ auto end = changed_values_.end();
+ for (auto src = dst; src != end; ++src) {
+ if (src->index < static_cast<int>(last_values.size()) &&
+ *src == last_values[src->index]) {
+ continue;
+ }
+ if (dst != src) *dst = *src;
+ ++dst;
+ }
+ changed_values_.erase(dst, end);
}
int pc_offset() const { return pc_offset_; }
@@ -183,67 +208,182 @@ class DebugSideTableBuilder {
private:
int pc_offset_;
- std::vector<DebugSideTable::Entry::Value> values_;
+ int stack_height_;
+ std::vector<Value> changed_values_;
};
- // Adds a new entry, and returns a pointer to a builder for modifying that
- // entry ({stack_height} includes {num_locals}).
- EntryBuilder* NewEntry(int pc_offset, int num_locals, int stack_height,
- LiftoffAssembler::VarState* stack_state,
- AssumeSpilling assume_spilling) {
- DCHECK_LE(num_locals, stack_height);
- // Record stack types.
- std::vector<DebugSideTable::Entry::Value> values(stack_height);
- for (int i = 0; i < stack_height; ++i) {
- const auto& slot = stack_state[i];
- values[i].type = slot.type();
- values[i].stack_offset = slot.offset();
+ // Adds a new entry in regular code.
+ void NewEntry(int pc_offset, Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ entries_.emplace_back(
+ pc_offset, static_cast<int>(stack_state.size()),
+ GetChangedStackValues(last_values_, stack_state, assume_spilling));
+ }
+
+ // Adds a new entry for OOL code, and returns a pointer to a builder for
+ // modifying that entry.
+ EntryBuilder* NewOOLEntry(Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ constexpr int kNoPcOffsetYet = -1;
+ ool_entries_.emplace_back(
+ kNoPcOffsetYet, static_cast<int>(stack_state.size()),
+ GetChangedStackValues(last_ool_values_, stack_state, assume_spilling));
+ return &ool_entries_.back();
+ }
+
+ void SetNumLocals(int num_locals) {
+ DCHECK_EQ(-1, num_locals_);
+ DCHECK_LE(0, num_locals);
+ num_locals_ = num_locals;
+ }
+
+ std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
+ DCHECK_LE(0, num_locals_);
+
+ // Connect {entries_} and {ool_entries_} by removing redundant stack
+ // information from the first {ool_entries_} entry (based on
+ // {last_values_}).
+ if (!entries_.empty() && !ool_entries_.empty()) {
+ ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_);
+ }
+
+ std::vector<Entry> entries;
+ entries.reserve(entries_.size() + ool_entries_.size());
+ for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
+ for (auto& entry : ool_entries_) entries.push_back(entry.ToTableEntry());
+ DCHECK(std::is_sorted(
+ entries.begin(), entries.end(),
+ [](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }));
+ return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
+ }
+
+ private:
+ static std::vector<Value> GetChangedStackValues(
+ std::vector<Value>& last_values,
+ Vector<LiftoffAssembler::VarState> stack_state,
+ AssumeSpilling assume_spilling) {
+ std::vector<Value> changed_values;
+ int old_stack_size = static_cast<int>(last_values.size());
+ last_values.resize(stack_state.size());
+
+ int index = 0;
+ for (const auto& slot : stack_state) {
+ Value new_value;
+ new_value.index = index;
+ new_value.kind = slot.kind();
switch (slot.loc()) {
case kIntConst:
- values[i].kind = DebugSideTable::Entry::kConstant;
- values[i].i32_const = slot.i32_const();
+ new_value.storage = Entry::kConstant;
+ new_value.i32_const = slot.i32_const();
break;
case kRegister:
DCHECK_NE(kDidSpill, assume_spilling);
if (assume_spilling == kAllowRegisters) {
- values[i].kind = DebugSideTable::Entry::kRegister;
- values[i].reg_code = slot.reg().liftoff_code();
+ new_value.storage = Entry::kRegister;
+ new_value.reg_code = slot.reg().liftoff_code();
break;
}
DCHECK_EQ(kAssumeSpilling, assume_spilling);
V8_FALLTHROUGH;
case kStack:
- values[i].kind = DebugSideTable::Entry::kStack;
- values[i].stack_offset = slot.offset();
+ new_value.storage = Entry::kStack;
+ new_value.stack_offset = slot.offset();
break;
}
+
+ if (index >= old_stack_size || last_values[index] != new_value) {
+ changed_values.push_back(new_value);
+ last_values[index] = new_value;
+ }
+ ++index;
}
- entries_.emplace_back(pc_offset, std::move(values));
- return &entries_.back();
+ return changed_values;
}
- void SetNumLocals(int num_locals) {
- DCHECK_EQ(-1, num_locals_);
- DCHECK_LE(0, num_locals);
- num_locals_ = num_locals;
+ int num_locals_ = -1;
+ // Keep a snapshot of the stack of the last entry, to generate a delta to the
+ // next entry.
+ std::vector<Value> last_values_;
+ std::vector<EntryBuilder> entries_;
+ // Keep OOL code entries separate so we can do proper delta-encoding (more
+ // entries might be added between the existing {entries_} and the
+ // {ool_entries_}). Store the entries in a list so the pointer is not
+ // invalidated by adding more entries.
+ std::vector<Value> last_ool_values_;
+ std::list<EntryBuilder> ool_entries_;
+};
+
+void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
+ const CompilationEnv* env) {
+ // Decode errors are ok.
+ if (reason == kDecodeError) return;
+
+ // Missing CPU features are also generally OK for now.
+ if (reason == kMissingCPUFeature) return;
+
+ // --liftoff-only ensures that tests actually exercise the Liftoff path
+ // without bailing out. Bailing out due to (simulated) lack of CPU support
+ // is okay though (see above).
+ if (FLAG_liftoff_only) {
+ FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
}
- std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
- DCHECK_LE(0, num_locals_);
- std::vector<DebugSideTable::Entry> entries;
- entries.reserve(entries_.size());
- for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
- std::sort(entries.begin(), entries.end(),
- [](DebugSideTable::Entry& a, DebugSideTable::Entry& b) {
- return a.pc_offset() < b.pc_offset();
- });
- return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
+ // If --enable-testing-opcode-in-wasm is set, we are expected to bailout with
+ // "testing opcode".
+ if (FLAG_enable_testing_opcode_in_wasm &&
+ strcmp(detail, "testing opcode") == 0) {
+ return;
}
- private:
- int num_locals_ = -1;
- std::list<EntryBuilder> entries_;
-};
+ // Some externally maintained architectures don't fully implement Liftoff yet.
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ return;
+#endif
+
+ // TODO(11235): On arm and arm64 there is still a limit on the size of
+ // supported stack frames.
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+ if (strstr(detail, "Stack limited to 512 bytes")) return;
+#endif
+
+#define LIST_FEATURE(name, ...) kFeature_##name,
+ constexpr WasmFeatures kExperimentalFeatures{
+ FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
+ constexpr WasmFeatures kStagedFeatures{
+ FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
+#undef LIST_FEATURE
+
+ // Bailout is allowed if any experimental feature is enabled.
+ if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
+
+ // Staged features should be feature complete in Liftoff according to
+ // https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
+ // listed here explicitly, with a bug assigned to each of them.
+
+ // TODO(6020): Fully implement SIMD in Liftoff.
+ STATIC_ASSERT(kStagedFeatures.has_simd());
+ if (reason == kSimd) {
+ DCHECK(env->enabled_features.has_simd());
+ return;
+ }
+
+ // TODO(7581): Fully implement reftypes in Liftoff.
+ STATIC_ASSERT(kStagedFeatures.has_reftypes());
+ if (reason == kRefTypes) {
+ DCHECK(env->enabled_features.has_reftypes());
+ return;
+ }
+
+ // TODO(v8:8091): Implement exception handling in Liftoff.
+ if (reason == kExceptionHandling) {
+ DCHECK(env->enabled_features.has_eh());
+ return;
+ }
+
+ // Otherwise, bailout is not allowed.
+ FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
+}
class LiftoffCompiler {
public:
@@ -252,12 +392,6 @@ class LiftoffCompiler {
using Value = ValueBase<validate>;
- static constexpr auto kI32 = ValueType::kI32;
- static constexpr auto kI64 = ValueType::kI64;
- static constexpr auto kF32 = ValueType::kF32;
- static constexpr auto kF64 = ValueType::kF64;
- static constexpr auto kS128 = ValueType::kS128;
-
struct ElseState {
MovableLabel label;
LiftoffAssembler::CacheState state;
@@ -276,6 +410,7 @@ class LiftoffCompiler {
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
+ using ValueKindSig = LiftoffAssembler::ValueKindSig;
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
@@ -283,7 +418,7 @@ class LiftoffCompiler {
struct Entry {
int offset;
LiftoffRegister reg;
- ValueType type;
+ ValueKind kind;
};
ZoneVector<Entry> entries;
@@ -352,7 +487,7 @@ class LiftoffCompiler {
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
- Vector<int> breakpoints = {}, int dead_breakpoint = 0)
+ Vector<const int> breakpoints = {}, int dead_breakpoint = 0)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
@@ -403,13 +538,7 @@ class LiftoffCompiler {
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
detail);
UnuseLabels(decoder);
- // --liftoff-only ensures that tests actually exercise the Liftoff path
- // without bailing out. Bailing out due to (simulated) lack of CPU support
- // is okay though.
- if (FLAG_liftoff_only && reason != kMissingCPUFeature) {
- FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s",
- detail);
- }
+ CheckBailoutAllowed(reason, detail, env_);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
@@ -418,37 +547,34 @@ class LiftoffCompiler {
return true;
}
- bool CheckSupportedType(FullDecoder* decoder, ValueType type,
+ bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
const char* context) {
LiftoffBailoutReason bailout_reason = kOtherReason;
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kI64:
- case ValueType::kF32:
- case ValueType::kF64:
+ switch (kind) {
+ case kI32:
+ case kI64:
+ case kF32:
+ case kF64:
return true;
- case ValueType::kS128:
+ case kS128:
if (CpuFeatures::SupportsWasmSimd128()) return true;
bailout_reason = kMissingCPUFeature;
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
- case ValueType::kI8:
- case ValueType::kI16:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI8:
+ case kI16:
if (FLAG_experimental_liftoff_extern_ref) return true;
- if (type.is_reference_to(HeapType::kExn)) {
- bailout_reason = kExceptionHandling;
- } else {
- bailout_reason = kRefTypes;
- }
+ bailout_reason = kRefTypes;
break;
- case ValueType::kBottom:
- case ValueType::kStmt:
+ case kBottom:
+ case kStmt:
UNREACHABLE();
}
EmbeddedVector<char, 128> buffer;
- SNPrintF(buffer, "%s %s", type.name().c_str(), context);
+ SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -479,27 +605,27 @@ class LiftoffCompiler {
int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
- ValueType type = decoder->local_type(i);
- __ set_local_type(i, type);
+ ValueKind kind = decoder->local_type(i).kind();
+ __ set_local_type(i, kind);
}
}
// Returns the number of inputs processed (1 or 2).
- uint32_t ProcessParameter(ValueType type, uint32_t input_idx) {
- const bool needs_pair = needs_gp_reg_pair(type);
- const ValueType reg_type = needs_pair ? kWasmI32 : type;
- const RegClass rc = reg_class_for(reg_type);
+ uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
+ const bool needs_pair = needs_gp_reg_pair(kind);
+ const ValueKind reg_kind = needs_pair ? kI32 : kind;
+ const RegClass rc = reg_class_for(reg_kind);
- auto LoadToReg = [this, reg_type, rc](compiler::LinkageLocation location,
+ auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
LiftoffRegList pinned) {
if (location.IsRegister()) {
DCHECK(!location.IsAnyRegister());
- return LiftoffRegister::from_external_code(rc, reg_type,
+ return LiftoffRegister::from_external_code(rc, reg_kind,
location.AsRegister());
}
DCHECK(location.IsCallerFrameSlot());
LiftoffRegister reg = __ GetUnusedRegister(rc, pinned);
- __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_type);
+ __ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
};
@@ -511,7 +637,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(reg));
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
}
- __ PushRegister(type, reg);
+ __ PushRegister(kind, reg);
return needs_pair ? 2 : 1;
}
@@ -536,11 +662,16 @@ class LiftoffCompiler {
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
position, regs_to_save, spilled_regs, safepoint_info,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
+ RegisterOOLDebugSideTableEntry()));
OutOfLineCode& ool = out_of_line_code_.back();
- LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
+ {});
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
+ // If the stack check triggers, we lose the cached instance register.
+ // TODO(clemensb): Restore that register in the OOL code so it's always
+ // available at the beginning of the actual function code.
+ __ cache_state()->ClearCachedInstanceRegister();
}
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
@@ -555,8 +686,8 @@ class LiftoffCompiler {
// because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- if (type != kWasmI32 && type != kWasmI64) return true;
+ ValueKind kind = __ local_type(param_idx);
+ if (kind != kI32 && kind != kI64) return true;
}
return false;
}
@@ -580,16 +711,6 @@ class LiftoffCompiler {
if (!CheckSupportedType(decoder, __ local_type(i), "param")) return;
}
- // Input 0 is the call target, the instance is at 1.
- constexpr int kInstanceParameterIndex = 1;
- // Store the instance parameter to a special stack slot.
- compiler::LinkageLocation instance_loc =
- descriptor_->GetInputLocation(kInstanceParameterIndex);
- DCHECK(instance_loc.IsRegister());
- DCHECK(!instance_loc.IsAnyRegister());
- Register instance_reg = Register::from_code(instance_loc.AsRegister());
- DCHECK_EQ(kWasmInstanceRegister, instance_reg);
-
// Parameter 0 is the instance parameter.
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
@@ -608,9 +729,19 @@ class LiftoffCompiler {
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
+ // Input 0 is the call target, the instance is at 1.
+ constexpr int kInstanceParameterIndex = 1;
+ // Check that {kWasmInstanceRegister} matches our call descriptor.
+ DCHECK_EQ(kWasmInstanceRegister,
+ Register::from_code(
+ descriptor_->GetInputLocation(kInstanceParameterIndex)
+ .AsRegister()));
+ // Store the instance parameter to a special stack slot.
+ __ SpillInstance(kWasmInstanceRegister);
+ __ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
+
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
- __ SpillInstance(instance_reg);
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
@@ -624,28 +755,32 @@ class LiftoffCompiler {
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- __ PushStack(type);
+ ValueKind kind = __ local_type(param_idx);
+ __ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size);
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- __ PushConstant(type, int32_t{0});
+ ValueKind kind = __ local_type(param_idx);
+ __ PushConstant(kind, int32_t{0});
}
}
if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
- for (uint32_t param_idx = num_params; param_idx < __ num_locals();
- ++param_idx) {
- ValueType type = decoder->local_type(param_idx);
- if (type.is_reference_type()) {
- LiftoffRegister result = __ GetUnusedRegister(kGpReg, {});
- LoadNullValue(result.gp(), {});
- __ Spill(__ cache_state()->stack_state.back().offset(), result, type);
+ Register null_ref_reg = no_reg;
+ for (uint32_t local_index = num_params; local_index < __ num_locals();
+ ++local_index) {
+ ValueKind kind = __ local_type(local_index);
+ if (is_reference_type(kind)) {
+ if (null_ref_reg == no_reg) {
+ null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
+ LoadNullValue(null_ref_reg, {});
+ }
+ __ Spill(__ cache_state()->stack_state[local_index].offset(),
+ LiftoffRegister(null_ref_reg), kind);
}
}
}
@@ -669,7 +804,7 @@ class LiftoffCompiler {
LiftoffRegister array_address =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
// Compute the correct offset in the array.
uint32_t offset =
@@ -692,9 +827,12 @@ class LiftoffCompiler {
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
// Unary "unequal" means "different from zero".
- __ emit_cond_jump(kUnequal, &no_tierup, kWasmI32,
- old_number_of_calls.gp());
+ __ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
TierUpFunction(decoder);
+ // After the runtime call, the instance cache register is clobbered (we
+ // reset it already in {SpillAllRegisters} above, but then we still access
+ // the instance afterwards).
+ __ cache_state()->ClearCachedInstanceRegister();
__ bind(&no_tierup);
}
@@ -735,15 +873,14 @@ class LiftoffCompiler {
__ PushRegisters(ool->regs_to_save);
} else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) {
- __ Spill(entry.offset, entry.reg, entry.type);
+ __ Spill(entry.offset, entry.reg, entry.kind);
}
}
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(
- &asm_, Safepoint::kNoLazyDeopt);
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
if (ool->safepoint_info) {
for (auto index : ool->safepoint_info->slots) {
@@ -774,7 +911,7 @@ class LiftoffCompiler {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) {
- __ Fill(entry.reg, entry.offset, entry.type);
+ __ Fill(entry.reg, entry.offset, entry.kind);
}
}
__ emit_jump(ool->continuation.get());
@@ -831,19 +968,29 @@ class LiftoffCompiler {
}
if (has_breakpoint) {
EmitBreakpoint(decoder);
- // Once we emitted a breakpoint, we don't need to check the "hook on
- // function call" any more.
- checked_hook_on_function_call_ = true;
- } else if (!checked_hook_on_function_call_) {
- checked_hook_on_function_call_ = true;
- // Check the "hook on function call" flag. If set, trigger a break.
- DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize);
+ // Once we emitted an unconditional breakpoint, we don't need to check
+ // function entry breaks any more.
+ did_function_entry_break_checks_ = true;
+ } else if (!did_function_entry_break_checks_) {
+ did_function_entry_break_checks_ = true;
+ DEBUG_CODE_COMMENT("check function entry break");
+ Label do_break;
Label no_break;
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
+
+ // Check the "hook on function call" flag. If set, trigger a break.
+ LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
+ {});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
+ // Unary "unequal" means "not equals zero".
+ __ emit_cond_jump(kUnequal, &do_break, kI32, flag);
+
+ // Check if we should stop on "script entry".
+ LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
// Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &no_break, kWasmI32, flag);
+ __ emit_cond_jump(kEqual, &no_break, kI32, flag);
+
+ __ bind(&do_break);
EmitBreakpoint(decoder);
__ bind(&no_break);
} else if (dead_breakpoint_ == decoder->position()) {
@@ -882,7 +1029,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallRuntimeStub(WasmCode::kWasmDebugBreak);
// TODO(ahaas): Define a proper safepoint here.
- safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ safepoint_table_builder_.DefineSafepoint(&asm_);
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAllowRegisters);
}
@@ -895,6 +1042,8 @@ class LiftoffCompiler {
// TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
+ // Same for the cached instance register.
+ __ cache_state()->ClearCachedInstanceRegister();
__ PrepareLoopArgs(loop->start_merge.arity);
@@ -939,8 +1088,7 @@ class LiftoffCompiler {
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
- __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kWasmI32,
- value);
+ __ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
@@ -1009,8 +1157,8 @@ class LiftoffCompiler {
void EndControl(FullDecoder* decoder, Control* c) {}
- void GenerateCCall(const LiftoffRegister* result_regs, const FunctionSig* sig,
- ValueType out_argument_type,
+ void GenerateCCall(const LiftoffRegister* result_regs,
+ const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
// Before making a call, spill all cache registers.
@@ -1018,14 +1166,13 @@ class LiftoffCompiler {
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- param_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ param_bytes += element_size_bytes(param_kind);
}
- int out_arg_bytes = out_argument_type == kWasmStmt
- ? 0
- : out_argument_type.element_size_bytes();
+ int out_arg_bytes =
+ out_argument_kind == kStmt ? 0 : element_size_bytes(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
- __ CallC(sig, arg_regs, result_regs, out_argument_type, stack_bytes,
+ __ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
}
@@ -1075,38 +1222,38 @@ class LiftoffCompiler {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
- template <ValueType::Kind src_type, ValueType::Kind result_type, class EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
void EmitUnOp(EmitFn fn) {
- constexpr RegClass src_rc = reg_class_for(src_type);
- constexpr RegClass result_rc = reg_class_for(result_type);
+ constexpr RegClass src_rc = reg_class_for(src_kind);
+ constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
- template <ValueType::Kind type>
+ template <ValueKind kind>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueType sig_reps[] = {ValueType::Primitive(type)};
- FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref);
+ ValueKind sig_reps[] = {kind};
+ ValueKindSig sig(0, 1, sig_reps);
+ GenerateCCall(&dst, &sig, kind, &src, ext_ref);
};
- EmitUnOp<type, type>(emit_with_c_fallback);
+ EmitUnOp<kind, kind>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
- template <ValueType::Kind dst_type, ValueType::Kind src_type,
+ template <ValueKind dst_type, ValueKind src_kind,
TypeConversionTrapping can_trap>
void EmitTypeConversion(WasmOpcode opcode, ExternalReference (*fallback_fn)(),
WasmCodePosition trap_position) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc
@@ -1122,22 +1269,20 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)};
- FunctionSig sig(1, 1, sig_reps);
+ ValueKind sig_reps[] = {kI32, src_kind};
+ ValueKindSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src,
- ext_ref);
- __ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
+ GenerateCCall(dst_regs, &sig, dst_type, &src, ext_ref);
+ __ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
} else {
- ValueType sig_reps[] = {ValueType::Primitive(src_type)};
- FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src,
- ext_ref);
+ ValueKind sig_reps[] = {src_kind};
+ ValueKindSig sig(0, 1, sig_reps);
+ GenerateCCall(&dst, &sig, dst_type, &src, ext_ref);
}
}
- __ PushRegister(ValueType::Primitive(dst_type), dst);
+ __ PushRegister(dst_type, dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
@@ -1148,16 +1293,16 @@ class LiftoffCompiler {
#define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \
return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
-#define CASE_FLOAT_UNOP(opcode, type, fn) \
+#define CASE_FLOAT_UNOP(opcode, kind, fn) \
case kExpr##opcode: \
- return EmitUnOp<k##type, k##type>(&LiftoffAssembler::emit_##fn);
-#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, type, fn) \
+ return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
+#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
case kExpr##opcode: \
- return EmitFloatUnOpWithCFallback<k##type>(&LiftoffAssembler::emit_##fn, \
+ return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
-#define CASE_TYPE_CONVERSION(opcode, dst_type, src_type, ext_ref, can_trap) \
+#define CASE_TYPE_CONVERSION(opcode, dst_type, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \
- return EmitTypeConversion<k##dst_type, k##src_type, can_trap>( \
+ return EmitTypeConversion<k##dst_type, k##src_kind, can_trap>( \
kExpr##opcode, ext_ref, can_trap ? decoder->position() : 0);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
@@ -1246,9 +1391,9 @@ class LiftoffCompiler {
return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
- ValueType sig_i_i_reps[] = {kWasmI32, kWasmI32};
- FunctionSig sig_i_i(1, 1, sig_i_i_reps);
- GenerateCCall(&dst, &sig_i_i, kWasmStmt, &src,
+ ValueKind sig_i_i_reps[] = {kI32, kI32};
+ ValueKindSig sig_i_i(1, 1, sig_i_i_reps);
+ GenerateCCall(&dst, &sig_i_i, kStmt, &src,
ExternalReference::wasm_word32_popcnt());
});
case kExprI64Popcnt:
@@ -1256,10 +1401,10 @@ class LiftoffCompiler {
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
- ValueType sig_i_l_reps[] = {kWasmI32, kWasmI64};
- FunctionSig sig_i_l(1, 1, sig_i_l_reps);
+ ValueKind sig_i_l_reps[] = {kI32, kI64};
+ ValueKindSig sig_i_l(1, 1, sig_i_l_reps);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
- GenerateCCall(&c_call_dst, &sig_i_l, kWasmStmt, &src,
+ GenerateCCall(&c_call_dst, &sig_i_l, kStmt, &src,
ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
@@ -1278,7 +1423,7 @@ class LiftoffCompiler {
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
return;
}
default:
@@ -1291,11 +1436,11 @@ class LiftoffCompiler {
#undef CASE_TYPE_CONVERSION
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn, typename EmitFnImm>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
+ typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
@@ -1312,18 +1457,18 @@ class LiftoffCompiler {
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
} else {
// The RHS was not an immediate.
- EmitBinOp<src_type, result_type>(fn);
+ EmitBinOp<src_kind, result_kind>(fn);
}
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
+ template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
@@ -1333,7 +1478,7 @@ class LiftoffCompiler {
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1347,16 +1492,15 @@ class LiftoffCompiler {
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
- ValueType sig_types[] = {kWasmI32, kWasmI64, kWasmI64};
+ ValueKind sig_kinds[] = {kI32, kI64, kI64};
// <i64, i64> -> i32 (with i64 output argument)
- FunctionSig sig(1, 2, sig_types);
- GenerateCCall(result_regs, &sig, kWasmI64, arg_regs, ext_ref);
+ ValueKindSig sig(1, 2, sig_kinds);
+ GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
- __ emit_cond_jump(kEqual, trap_by_zero, kWasmI32, ret.gp(), tmp.gp());
+ __ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1}));
- __ emit_cond_jump(kEqual, trap_unrepresentable, kWasmI32, ret.gp(),
- tmp.gp());
+ __ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
}
}
@@ -1383,17 +1527,17 @@ class LiftoffCompiler {
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \
&LiftoffAssembler::emit_##fn##i);
-#define CASE_CCALL_BINOP(opcode, type, ext_ref_fn) \
+#define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
case kExpr##opcode: \
- return EmitBinOp<k##type, k##type>( \
+ return EmitBinOp<k##kind, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
- ValueType sig_reps[] = {kWasm##type, kWasm##type, kWasm##type}; \
- const bool out_via_stack = kWasm##type == kWasmI64; \
- FunctionSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
- ValueType out_arg_type = out_via_stack ? kWasmI64 : kWasmStmt; \
- GenerateCCall(&dst, &sig, out_arg_type, args, ext_ref); \
+ ValueKind sig_reps[] = {k##kind, k##kind, k##kind}; \
+ const bool out_via_stack = k##kind == kI64; \
+ ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_reps); \
+ ValueKind out_arg_kind = out_via_stack ? kI64 : kStmt; \
+ GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
});
switch (opcode) {
case kExprI32Add:
@@ -1650,7 +1794,7 @@ class LiftoffCompiler {
}
});
case kExprRefEq: {
- return EmitBinOp<ValueType::kOptRef, kI32>(
+ return EmitBinOp<kOptRef, kI32>(
BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual));
}
@@ -1662,7 +1806,7 @@ class LiftoffCompiler {
}
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
- __ PushConstant(kWasmI32, value);
+ __ PushConstant(kI32, value);
}
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
@@ -1672,24 +1816,24 @@ class LiftoffCompiler {
// a register immediately.
int32_t value_i32 = static_cast<int32_t>(value);
if (value_i32 == value) {
- __ PushConstant(kWasmI64, value_i32);
+ __ PushConstant(kI64, value_i32);
} else {
- LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmI64, reg);
+ __ PushRegister(kI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmF32, reg);
+ __ PushRegister(kF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
- __ PushRegister(kWasmF64, reg);
+ __ PushRegister(kF64, reg);
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
@@ -1699,18 +1843,29 @@ class LiftoffCompiler {
}
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
- __ PushRegister(type, null);
+ __ PushRegister(type.kind(), null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, kRefTypes, "func");
+ WasmCode::RuntimeStubId target = WasmCode::kWasmRefFunc;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmRefFuncDescriptor>(compilation_zone_);
+ ValueKind sig_reps[] = {kRef, kI32};
+ ValueKindSig sig(1, 1, sig_reps);
+ LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
+ __ LoadConstant(func_index_reg, WasmValue(function_index));
+ LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor, {func_index_var});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ __ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
- __ PushRegister(ValueType::Ref(arg.type.heap_type(), kNonNullable), obj);
+ __ PushRegister(kRef, obj);
}
void Drop(FullDecoder* decoder) { __ DropValues(1); }
@@ -1728,11 +1883,11 @@ class LiftoffCompiler {
// are not handled yet.
size_t num_returns = decoder->sig_->return_count();
if (num_returns == 1) {
- ValueType return_type = decoder->sig_->GetReturn(0);
+ ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
__ Store(info.gp(), no_reg, 0, return_reg,
- StoreType::ForValueType(return_type), pinned);
+ StoreType::ForValueKind(return_kind), pinned);
}
// Put the parameter in its place.
WasmTraceExitDescriptor descriptor;
@@ -1740,7 +1895,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(
@@ -1751,7 +1906,7 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(int64_t));
}
- void ReturnImpl(FullDecoder* decoder) {
+ void DoReturn(FullDecoder* decoder) {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
@@ -1761,15 +1916,11 @@ class LiftoffCompiler {
static_cast<uint32_t>(descriptor_->StackParameterCount()));
}
- void DoReturn(FullDecoder* decoder, Vector<Value> /*values*/) {
- ReturnImpl(decoder);
- }
-
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
- local_slot.type(), __ NextSpillOffset(local_slot.type()));
+ local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
auto* slot = &__ cache_state()->stack_state.back();
if (local_slot.is_reg()) {
__ cache_state()->inc_used(local_slot.reg());
@@ -1778,11 +1929,11 @@ class LiftoffCompiler {
slot->MakeConstant(local_slot.i32_const());
} else {
DCHECK(local_slot.is_stack());
- auto rc = reg_class_for(local_slot.type());
+ auto rc = reg_class_for(local_slot.kind());
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ cache_state()->inc_used(reg);
slot->MakeRegister(reg);
- __ Fill(reg, local_slot.offset(), local_slot.type());
+ __ Fill(reg, local_slot.offset(), local_slot.kind());
}
}
@@ -1790,21 +1941,21 @@ class LiftoffCompiler {
uint32_t local_index) {
auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back();
- ValueType type = dst_slot->type();
+ ValueKind kind = dst_slot->kind();
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
- __ Fill(dst_slot->reg(), src_slot.offset(), type);
+ __ Fill(dst_slot->reg(), src_slot.offset(), kind);
return;
}
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
- DCHECK_EQ(type, __ local_type(local_index));
- RegClass rc = reg_class_for(type);
+ DCHECK_EQ(kind, __ local_type(local_index));
+ RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
- __ Fill(dst_reg, src_slot.offset(), type);
- *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
+ __ Fill(dst_reg, src_slot.offset(), kind);
+ *dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
@@ -1853,69 +2004,117 @@ class LiftoffCompiler {
LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
- LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize,
+ *pinned);
__ Load(LiftoffRegister(addr), addr, no_reg,
global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
- LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned);
*offset = global->offset;
}
return addr;
}
+ void GetBaseAndOffsetForImportedMutableExternRefGlobal(
+ const WasmGlobal* global, LiftoffRegList* pinned, Register* base,
+ Register* offset) {
+ Register globals_buffer =
+ pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer,
+ ImportedMutableGlobalsBuffers, *pinned);
+ *base = globals_buffer;
+ __ LoadTaggedPointer(
+ *base, globals_buffer, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset),
+ *pinned);
+
+ // For the offset we need the index of the global in the buffer, and
+ // then calculate the actual offset from the index. Load the index from
+ // the ImportedMutableGlobals array of the instance.
+ Register imported_mutable_globals =
+ pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
+
+ LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals,
+ kSystemPointerSize, *pinned);
+ *offset = imported_mutable_globals;
+ __ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
+ global->index * sizeof(Address),
+ kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load,
+ *pinned);
+ __ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
+ __ emit_i32_addi(*offset, *offset,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
+ }
+
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, global->type, "global")) {
+ ValueKind kind = global->type.kind();
+ if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
- if (global->type.is_reference_type()) {
+ if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
- unsupported(decoder, kRefTypes, "imported mutable globals");
+ LiftoffRegList pinned;
+ Register base = no_reg;
+ Register offset = no_reg;
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
+ &base, &offset);
+ __ LoadTaggedPointer(base, base, offset, 0, pinned);
+ __ PushRegister(kind, LiftoffRegister(base));
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
+ pinned);
Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadTaggedPointer(value, globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
pinned);
- __ PushRegister(global->type, LiftoffRegister(value));
+ __ PushRegister(kind, LiftoffRegister(value));
return;
}
LiftoffRegList pinned;
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
- pinned.set(__ GetUnusedRegister(reg_class_for(global->type), pinned));
- LoadType type = LoadType::ForValueType(global->type);
+ pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
+ LoadType type = LoadType::ForValueKind(kind);
__ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
- __ PushRegister(global->type, value);
+ __ PushRegister(kind, value);
}
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
- if (!CheckSupportedType(decoder, global->type, "global")) {
+ ValueKind kind = global->type.kind();
+ if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
- if (global->type.is_reference_type()) {
+ if (is_reference_type(kind)) {
if (global->mutability && global->imported) {
- unsupported(decoder, kRefTypes, "imported mutable globals");
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ Register base = no_reg;
+ Register offset = no_reg;
+ GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
+ &base, &offset);
+ __ StoreTaggedPointer(base, offset, 0, value, pinned);
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
+ pinned);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
__ StoreTaggedPointer(globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
@@ -1927,7 +2126,7 @@ class LiftoffCompiler {
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
- StoreType type = StoreType::ForValueType(global->type);
+ StoreType type = StoreType::ForValueKind(kind);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
@@ -1947,9 +2146,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableGetDescriptor>(compilation_zone_);
- ValueType result_type = env_->module->tables[imm.index].type;
- ValueType sig_reps[] = {result_type, kWasmI32, kWasmI32};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind result_kind = env_->module->tables[imm.index].type.kind();
+ ValueKind sig_reps[] = {result_kind, kI32, kI32};
+ ValueKindSig sig(1, 2, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index});
__ CallRuntimeStub(target);
@@ -1960,7 +2159,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(result_type, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
@@ -1980,9 +2179,9 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableSetDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32,
- env_->module->tables[imm.index].type};
- FunctionSig sig(0, 3, sig_reps);
+ ValueKind table_kind = env_->module->tables[imm.index].type.kind();
+ ValueKind sig_reps[] = {kI32, kI32, table_kind};
+ ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor, {table_index, index, value});
__ CallRuntimeStub(target);
@@ -2001,29 +2200,33 @@ class LiftoffCompiler {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
+ void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
+ unsupported(decoder, kOtherReason, "testing opcode");
+ }
+
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
- ValueType type = __ cache_state()->stack_state.end()[-1].type();
- DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
+ ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
+ DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
- if (!__ emit_select(dst, condition, true_value, false_value, type)) {
+ if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
// Emit generic code (using branches) instead.
Label cont;
Label case_false;
- __ emit_cond_jump(kEqual, &case_false, kWasmI32, condition);
- if (dst != true_value) __ Move(dst, true_value, type);
+ __ emit_cond_jump(kEqual, &case_false, kI32, condition);
+ if (dst != true_value) __ Move(dst, true_value, kind);
__ emit_jump(&cont);
__ bind(&case_false);
- if (dst != false_value) __ Move(dst, false_value, type);
+ if (dst != false_value) __ Move(dst, false_value, kind);
__ bind(&cont);
}
- __ PushRegister(type, dst);
+ __ PushRegister(kind, dst);
}
void BrImpl(Control* target) {
@@ -2038,7 +2241,7 @@ class LiftoffCompiler {
void BrOrRet(FullDecoder* decoder, uint32_t depth) {
if (depth == decoder->control_depth() - 1) {
- ReturnImpl(decoder);
+ DoReturn(decoder);
} else {
BrImpl(decoder->control_at(depth));
}
@@ -2058,17 +2261,17 @@ class LiftoffCompiler {
if (!has_outstanding_op()) {
// Unary "equal" means "equals zero".
- __ emit_cond_jump(kEqual, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(kEqual, &cont_false, kI32, value);
} else if (outstanding_op_ == kExprI32Eqz) {
// Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, &cont_false, kWasmI32, value);
+ __ emit_cond_jump(kUnequal, &cont_false, kI32, value);
outstanding_op_ = kNoOutstandingOp;
} else {
// Otherwise, it's an i32 compare opcode.
LiftoffCondition cond = Negate(GetCompareCondition(outstanding_op_));
Register rhs = value;
Register lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)).gp();
- __ emit_cond_jump(cond, &cont_false, kWasmI32, lhs, rhs);
+ __ emit_cond_jump(cond, &cont_false, kI32, lhs, rhs);
outstanding_op_ = kNoOutstandingOp;
}
@@ -2106,7 +2309,7 @@ class LiftoffCompiler {
uint32_t split = min + (max - min) / 2;
Label upper_half;
__ LoadConstant(tmp, WasmValue(split));
- __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kWasmI32, value.gp(),
+ __ emit_cond_jump(kUnsignedGreaterEqual, &upper_half, kI32, value.gp(),
tmp.gp());
// Emit br table for lower half:
GenerateBrTable(decoder, tmp, value, min, split, table_iterator,
@@ -2130,8 +2333,8 @@ class LiftoffCompiler {
LiftoffRegister tmp = __ GetUnusedRegister(kGpReg, pinned);
__ LoadConstant(tmp, WasmValue(uint32_t{imm.table_count}));
Label case_default;
- __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kWasmI32,
- value.gp(), tmp.gp());
+ __ emit_cond_jump(kUnsignedGreaterEqual, &case_default, kI32, value.gp(),
+ tmp.gp());
GenerateBrTable(decoder, tmp, value, 0, imm.table_count, &table_iterator,
&br_targets);
@@ -2169,7 +2372,7 @@ class LiftoffCompiler {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
spilled->entries.push_back(SpilledRegistersForInspection::Entry{
- slot.offset(), slot.reg(), slot.type()});
+ slot.offset(), slot.reg(), slot.kind()});
__ RecordUsedSpillOffset(slot.offset());
}
return spilled;
@@ -2194,8 +2397,7 @@ class LiftoffCompiler {
stub, position,
V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersForInspection()
: nullptr,
- safepoint_info, pc,
- RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
+ safepoint_info, pc, RegisterOOLDebugSideTableEntry()));
return out_of_line_code_.back().label.get();
}
@@ -2250,7 +2452,7 @@ class LiftoffCompiler {
} else if (kSystemPointerSize == kInt32Size) {
DCHECK_GE(kMaxUInt32, env_->max_memory_size);
// Unary "unequal" means "not equals zero".
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, index.high_gp());
+ __ emit_cond_jump(kUnequal, trap_label, kI32, index.high_gp());
}
uintptr_t end_offset = offset + access_size - 1u;
@@ -2259,7 +2461,7 @@ class LiftoffCompiler {
LiftoffRegister end_offset_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister mem_size = __ GetUnusedRegister(kGpReg, pinned);
- LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size.gp(), MemorySize, kSystemPointerSize, pinned);
__ LoadConstant(end_offset_reg, WasmValue::ForUintPtr(end_offset));
@@ -2298,12 +2500,12 @@ class LiftoffCompiler {
// {emit_cond_jump} to use the "test" instruction without the "and" here.
// Then we can also avoid using the temp register here.
__ emit_i32_andi(address, index, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
+ __ emit_cond_jump(kUnequal, trap_label, kI32, address);
} else {
// For alignment checks we only look at the lower 32-bits in {offset}.
__ emit_i32_addi(address, index, static_cast<uint32_t>(offset));
__ emit_i32_andi(address, address, align_mask);
- __ emit_cond_jump(kUnequal, trap_label, kWasmI32, address);
+ __ emit_cond_jump(kUnequal, trap_label, kI32, address);
}
}
@@ -2353,7 +2555,7 @@ class LiftoffCompiler {
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
- __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kIntPtr);
}
source_position_table_builder_.AddPosition(__ pc_offset(),
@@ -2380,7 +2582,7 @@ class LiftoffCompiler {
}
}
Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
if (*offset) __ emit_ptrsize_addi(index, index, *offset);
__ emit_ptrsize_and(index, index, tmp);
*offset = 0;
@@ -2396,8 +2598,8 @@ class LiftoffCompiler {
void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
- ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, value_type, "load")) return;
+ ValueKind kind = type.value_type().kind();
+ if (!CheckSupportedType(decoder, kind, "load")) return;
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck);
@@ -2408,8 +2610,8 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- RegClass rc = reg_class_for(value_type);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
@@ -2418,7 +2620,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
- __ PushRegister(value_type, value);
+ __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -2432,7 +2634,7 @@ class LiftoffCompiler {
const Value& index_val, Value* result) {
// LoadTransform requires SIMD support, so check for it here. If
// unsupported, bailout and let TurboFan lower the code.
- if (!CheckSupportedType(decoder, kWasmS128, "LoadTransform")) {
+ if (!CheckSupportedType(decoder, kS128, "LoadTransform")) {
return;
}
@@ -2451,7 +2653,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load with transformation");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
__ LoadTransform(value, addr, index, offset, type, transform,
@@ -2462,7 +2664,7 @@ class LiftoffCompiler {
WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
- __ PushRegister(ValueType::Primitive(kS128), value);
+ __ PushRegister(kS128, value);
if (FLAG_trace_wasm_memory) {
// Again load extend is different.
@@ -2477,7 +2679,7 @@ class LiftoffCompiler {
void LoadLane(FullDecoder* decoder, LoadType type, const Value& _value,
const Value& _index, const MemoryAccessImmediate<validate>& imm,
const uint8_t laneidx, Value* _result) {
- if (!CheckSupportedType(decoder, kWasmS128, "LoadLane")) {
+ if (!CheckSupportedType(decoder, kS128, "LoadLane")) {
return;
}
@@ -2493,7 +2695,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load lane");
Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
uint32_t protected_load_pc = 0;
@@ -2505,7 +2707,7 @@ class LiftoffCompiler {
protected_load_pc);
}
- __ PushRegister(ValueType::Primitive(kS128), result);
+ __ PushRegister(kS128, result);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -2516,8 +2718,8 @@ class LiftoffCompiler {
void StoreMem(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, const Value& value_val) {
- ValueType value_type = type.value_type();
- if (!CheckSupportedType(decoder, value_type, "store")) return;
+ ValueKind kind = type.value_type().kind();
+ if (!CheckSupportedType(decoder, kind, "store")) return;
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
@@ -2530,7 +2732,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
uint32_t protected_store_pc = 0;
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
@@ -2548,16 +2750,48 @@ class LiftoffCompiler {
}
void StoreLane(FullDecoder* decoder, StoreType type,
- const MemoryAccessImmediate<validate>& imm, const Value& index,
- const Value& value, const uint8_t laneidx) {
- unsupported(decoder, kSimd, "simd load lane");
+ const MemoryAccessImmediate<validate>& imm,
+ const Value& _index, const Value& _value, const uint8_t lane) {
+ if (!CheckSupportedType(decoder, kS128, "StoreLane")) return;
+ LiftoffRegList pinned;
+ LiftoffRegister value = pinned.set(__ PopToRegister());
+ LiftoffRegister full_index = __ PopToRegister(pinned);
+ Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
+ full_index, pinned, kDontForceCheck);
+ if (index == no_reg) return;
+
+ uintptr_t offset = imm.offset;
+ pinned.set(index);
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("store lane to memory");
+ Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ uint32_t protected_store_pc = 0;
+ __ StoreLane(addr, index, offset, value, type, lane, &protected_store_pc);
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_store_pc);
+ }
+ if (FLAG_trace_wasm_memory) {
+ TraceMemoryOperation(true, type.mem_rep(), index, offset,
+ decoder->position());
+ }
}
- void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
+ void CurrentMemoryPages(FullDecoder* /* decoder */, Value* /* result */) {
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
- LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize, {});
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
- __ PushRegister(kWasmI32, LiftoffRegister(mem_size));
+ LiftoffRegister result{mem_size};
+ if (env_->module->is_memory64 && kNeedI64RegPair) {
+ LiftoffRegister high_word =
+ __ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(mem_size));
+ // The high word is always 0 on 32-bit systems.
+ __ LoadConstant(high_word, WasmValue{uint32_t{0}});
+ result = LiftoffRegister::ForPair(mem_size, high_word.gp());
+ }
+ __ PushRegister(env_->module->is_memory64 ? kI64 : kI32, result);
}
void MemoryGrow(FullDecoder* decoder, const Value& value, Value* result_val) {
@@ -2575,29 +2809,35 @@ class LiftoffCompiler {
WasmMemoryGrowDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
- DCHECK_EQ(kWasmI32.machine_type(), descriptor.GetParameterType(0));
+ DCHECK_EQ(machine_type(kI32), descriptor.GetParameterType(0));
Register param_reg = descriptor.GetRegisterParameter(0);
- if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kWasmI32);
+ if (input.gp() != param_reg) __ Move(param_reg, input.gp(), kI32);
__ CallRuntimeStub(WasmCode::kWasmMemoryGrow);
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
if (kReturnRegister0 != result.gp()) {
- __ Move(result.gp(), kReturnRegister0, kWasmI32);
+ __ Move(result.gp(), kReturnRegister0, kI32);
}
- __ PushRegister(kWasmI32, result);
+ __ PushRegister(kI32, result);
}
- DebugSideTableBuilder::EntryBuilder* RegisterDebugSideTableEntry(
+ void RegisterDebugSideTableEntry(
DebugSideTableBuilder::AssumeSpilling assume_spilling) {
+ if (V8_LIKELY(!debug_sidetable_builder_)) return;
+ debug_sidetable_builder_->NewEntry(__ pc_offset(),
+ VectorOf(__ cache_state()->stack_state),
+ assume_spilling);
+ }
+
+ DebugSideTableBuilder::EntryBuilder* RegisterOOLDebugSideTableEntry() {
if (V8_LIKELY(!debug_sidetable_builder_)) return nullptr;
- int stack_height = static_cast<int>(__ cache_state()->stack_height());
- return debug_sidetable_builder_->NewEntry(
- __ pc_offset(), __ num_locals(), stack_height,
- __ cache_state()->stack_state.begin(), assume_spilling);
+ return debug_sidetable_builder_->NewOOLEntry(
+ VectorOf(__ cache_state()->stack_state),
+ DebugSideTableBuilder::kAssumeSpilling);
}
enum CallKind : bool { kReturnCall = true, kNoReturnCall = false };
@@ -2617,7 +2857,7 @@ class LiftoffCompiler {
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- unsupported(decoder, kRefTypes, "call_ref");
+ CallRef(decoder, func_ref.type, sig, kNoReturnCall);
}
void ReturnCall(FullDecoder* decoder,
@@ -2635,7 +2875,7 @@ class LiftoffCompiler {
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- unsupported(decoder, kRefTypes, "call_ref");
+ CallRef(decoder, func_ref.type, sig, kReturnCall);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -2649,21 +2889,20 @@ class LiftoffCompiler {
Label cont_false;
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
- Register null = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
LoadNullValue(null, pinned);
- __ emit_cond_jump(kUnequal, &cont_false, ref_object.type, ref.gp(), null);
+ __ emit_cond_jump(kUnequal, &cont_false, ref_object.type.kind(), ref.gp(),
+ null);
BrOrRet(decoder, depth);
__ bind(&cont_false);
- __ PushRegister(ValueType::Ref(ref_object.type.heap_type(), kNonNullable),
- ref);
+ __ PushRegister(kRef, ref);
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src3 = __ PopToRegister();
LiftoffRegister src2 = __ PopToRegister(LiftoffRegList::ForRegs(src3));
LiftoffRegister src1 =
@@ -2676,12 +2915,12 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
template <typename EmitFn, typename EmitFnImm>
void EmitSimdShiftOp(EmitFn fn, EmitFnImm fnImm) {
- static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
@@ -2693,30 +2932,30 @@ class LiftoffCompiler {
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
}
void EmitSimdFloatRoundingOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(LiftoffRegister, LiftoffRegister),
ExternalReference (*ext_ref)()) {
- static constexpr RegClass rc = reg_class_for(kWasmS128);
+ static constexpr RegClass rc = reg_class_for(kS128);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(rc, {src}, {});
if (!(asm_.*emit_fn)(dst, src)) {
// Return v128 via stack for ARM.
- ValueType sig_v_s_reps[] = {kWasmS128};
- FunctionSig sig_v_s(0, 1, sig_v_s_reps);
- GenerateCCall(&dst, &sig_v_s, kWasmS128, &src, ext_ref());
+ ValueKind sig_v_s_reps[] = {kS128};
+ ValueKindSig sig_v_s(0, 1, sig_v_s_reps);
+ GenerateCCall(&dst, &sig_v_s, kS128, &src, ext_ref());
}
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
@@ -2727,6 +2966,8 @@ class LiftoffCompiler {
switch (opcode) {
case wasm::kExprI8x16Swizzle:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_swizzle);
+ case wasm::kExprI8x16Popcnt:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_popcnt);
case wasm::kExprI8x16Splat:
return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
case wasm::kExprI16x8Splat:
@@ -2811,6 +3052,18 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_s);
case wasm::kExprI32x4GeU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_ge_u);
+ case wasm::kExprI64x2Eq:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_eq);
+ case wasm::kExprI64x2LtS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i64x2_gt_s);
+ case wasm::kExprI64x2GtS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_gt_s);
+ case wasm::kExprI64x2LeS:
+ return EmitBinOp<kS128, kS128, true>(
+ &LiftoffAssembler::emit_i64x2_ge_s);
+ case wasm::kExprI64x2GeS:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_ge_s);
case wasm::kExprF32x4Eq:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_eq);
case wasm::kExprF32x4Ne:
@@ -2847,8 +3100,8 @@ class LiftoffCompiler {
return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
case wasm::kExprI8x16Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
- case wasm::kExprV8x16AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_anytrue);
+ case wasm::kExprV128AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v128_anytrue);
case wasm::kExprV8x16AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_alltrue);
case wasm::kExprI8x16BitMask:
@@ -2886,8 +3139,6 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
case wasm::kExprI16x8Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
- case wasm::kExprV16x8AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_anytrue);
case wasm::kExprV16x8AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_alltrue);
case wasm::kExprI16x8BitMask:
@@ -2923,6 +3174,12 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_s);
case wasm::kExprI16x8MaxU:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16S:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s);
+ case wasm::kExprI16x8ExtAddPairwiseI8x16U:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u);
case wasm::kExprI16x8ExtMulLowI8x16S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s);
@@ -2935,10 +3192,11 @@ class LiftoffCompiler {
case wasm::kExprI16x8ExtMulHighI8x16U:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u);
+ case wasm::kExprI16x8Q15MulRSatS:
+ return EmitBinOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i16x8_q15mulr_sat_s);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
- case wasm::kExprV32x4AnyTrue:
- return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_anytrue);
case wasm::kExprV32x4AllTrue:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_alltrue);
case wasm::kExprI32x4BitMask:
@@ -2969,6 +3227,12 @@ class LiftoffCompiler {
case wasm::kExprI32x4DotI16x8S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i32x4_dot_i16x8_s);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8S:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s);
+ case wasm::kExprI32x4ExtAddPairwiseI16x8U:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u);
case wasm::kExprI32x4ExtMulLowI16x8S:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s);
@@ -2983,6 +3247,8 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u);
case wasm::kExprI64x2Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_neg);
+ case wasm::kExprV64x2AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v64x2_alltrue);
case wasm::kExprI64x2Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
&LiftoffAssembler::emit_i64x2_shli);
@@ -3012,6 +3278,18 @@ class LiftoffCompiler {
&LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u);
case wasm::kExprI64x2BitMask:
return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i64x2_bitmask);
+ case wasm::kExprI64x2SConvertI32x4Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_sconvert_i32x4_low);
+ case wasm::kExprI64x2SConvertI32x4High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_sconvert_i32x4_high);
+ case wasm::kExprI64x2UConvertI32x4Low:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_uconvert_i32x4_low);
+ case wasm::kExprI64x2UConvertI32x4High:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i64x2_uconvert_i32x4_high);
case wasm::kExprF32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_f32x4_abs);
case wasm::kExprF32x4Neg:
@@ -3150,26 +3428,27 @@ class LiftoffCompiler {
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_abs);
case wasm::kExprI32x4Abs:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_abs);
+ case wasm::kExprI64x2Abs:
+ return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_abs);
default:
unsupported(decoder, kSimd, "simd");
}
}
- template <ValueType::Kind src_type, ValueType::Kind result_type,
- typename EmitFn>
+ template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitSimdExtractLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
- static constexpr RegClass src_rc = reg_class_for(src_type);
- static constexpr RegClass result_rc = reg_class_for(result_type);
+ static constexpr RegClass src_rc = reg_class_for(src_kind);
+ static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
- __ PushRegister(ValueType::Primitive(result_type), dst);
+ __ PushRegister(result_kind, dst);
}
- template <ValueType::Kind src2_type, typename EmitFn>
+ template <ValueKind src2_type, typename EmitFn>
void EmitSimdReplaceLaneOp(EmitFn fn,
const SimdLaneImmediate<validate>& imm) {
static constexpr RegClass src1_rc = reg_class_for(kS128);
@@ -3192,7 +3471,7 @@ class LiftoffCompiler {
LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void SimdLaneOp(FullDecoder* decoder, WasmOpcode opcode,
@@ -3202,9 +3481,9 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
-#define CASE_SIMD_EXTRACT_LANE_OP(opcode, type, fn) \
+#define CASE_SIMD_EXTRACT_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
- EmitSimdExtractLaneOp<kS128, k##type>( \
+ EmitSimdExtractLaneOp<kS128, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { \
__ emit_##fn(dst, lhs, imm_lane_idx); \
}, \
@@ -3219,9 +3498,9 @@ class LiftoffCompiler {
CASE_SIMD_EXTRACT_LANE_OP(F32x4ExtractLane, F32, f32x4_extract_lane)
CASE_SIMD_EXTRACT_LANE_OP(F64x2ExtractLane, F64, f64x2_extract_lane)
#undef CASE_SIMD_EXTRACT_LANE_OP
-#define CASE_SIMD_REPLACE_LANE_OP(opcode, type, fn) \
+#define CASE_SIMD_REPLACE_LANE_OP(opcode, kind, fn) \
case wasm::kExpr##opcode: \
- EmitSimdReplaceLaneOp<k##type>( \
+ EmitSimdReplaceLaneOp<k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, \
uint8_t imm_lane_idx) { \
__ emit_##fn(dst, src1, src2, imm_lane_idx); \
@@ -3245,7 +3524,7 @@ class LiftoffCompiler {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
- constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {});
bool all_zeroes = std::all_of(std::begin(imm.value), std::end(imm.value),
[](uint8_t v) { return v == 0; });
@@ -3259,7 +3538,7 @@ class LiftoffCompiler {
} else {
__ LiftoffAssembler::emit_s128_const(dst, imm.value);
}
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
}
void Simd8x16ShuffleOp(FullDecoder* decoder,
@@ -3269,7 +3548,7 @@ class LiftoffCompiler {
if (!CpuFeatures::SupportsWasmSimd128()) {
return unsupported(decoder, kSimd, "simd");
}
- static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass result_rc = reg_class_for(kS128);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
@@ -3284,13 +3563,127 @@ class LiftoffCompiler {
std::swap(lhs, rhs);
}
__ LiftoffAssembler::emit_i8x16_shuffle(dst, lhs, rhs, shuffle, is_swizzle);
- __ PushRegister(kWasmS128, dst);
+ __ PushRegister(kS128, dst);
+ }
+
+ void ToSmi(Register reg) {
+ if (COMPRESS_POINTERS_BOOL || kSystemPointerSize == 4) {
+ __ emit_i32_shli(reg, reg, kSmiShiftSize + kSmiTagSize);
+ } else {
+ __ emit_i64_shli(LiftoffRegister{reg}, LiftoffRegister{reg},
+ kSmiShiftSize + kSmiTagSize);
+ }
}
- void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
- const Vector<Value>& args) {
- unsupported(decoder, kExceptionHandling, "throw");
+ void Store32BitExceptionValue(Register values_array, int* index_in_array,
+ Register value, LiftoffRegList pinned) {
+ LiftoffRegister tmp_reg = __ GetUnusedRegister(kGpReg, pinned);
+ // Get the lower half word into tmp_reg and extend to a Smi.
+ --*index_in_array;
+ __ emit_i32_andi(tmp_reg.gp(), value, 0xffff);
+ ToSmi(tmp_reg.gp());
+ __ StoreTaggedPointer(
+ values_array, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
+ tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
+
+ // Get the upper half word into tmp_reg and extend to a Smi.
+ --*index_in_array;
+ __ emit_i32_shri(tmp_reg.gp(), value, 16);
+ ToSmi(tmp_reg.gp());
+ __ StoreTaggedPointer(
+ values_array, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(*index_in_array),
+ tmp_reg, pinned, LiftoffAssembler::kSkipWriteBarrier);
+ }
+
+ void StoreExceptionValue(ValueType type, Register values_array,
+ int* index_in_array, LiftoffRegList pinned) {
+ // TODO(clemensb): Handle more types.
+ DCHECK_EQ(kWasmI32, type);
+ LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
+ Store32BitExceptionValue(values_array, index_in_array, value.gp(), pinned);
}
+
+ void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>& imm,
+ const Vector<Value>& /* args */) {
+ LiftoffRegList pinned;
+
+ // Load the encoded size in a register for the builtin call.
+ int encoded_size = WasmExceptionPackage::GetEncodedSize(imm.exception);
+ LiftoffRegister encoded_size_reg =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ LoadConstant(encoded_size_reg, WasmValue(encoded_size));
+
+ // Call the WasmAllocateFixedArray builtin to create the values array.
+ DEBUG_CODE_COMMENT("call WasmAllocateFixedArray builtin");
+ compiler::CallDescriptor* create_values_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocateFixedArrayDescriptor>(
+ compilation_zone_);
+
+ ValueKind create_values_sig_reps[] = {kPointerValueType,
+ LiftoffAssembler::kIntPtr};
+ ValueKindSig create_values_sig(1, 1, create_values_sig_reps);
+
+ __ PrepareBuiltinCall(
+ &create_values_sig, create_values_descriptor,
+ {LiftoffAssembler::VarState{kSmiValueType,
+ LiftoffRegister{encoded_size_reg}, 0}});
+ __ CallRuntimeStub(WasmCode::kWasmAllocateFixedArray);
+ DefineSafepoint();
+
+ // The FixedArray for the exception values is now in the first gp return
+ // register.
+ DCHECK_EQ(kReturnRegister0.code(),
+ create_values_descriptor->GetReturnLocation(0).AsRegister());
+ LiftoffRegister values_array{kReturnRegister0};
+ pinned.set(values_array);
+
+ // Now store the exception values in the FixedArray. Do this from last to
+ // first value, such that we can just pop them from the value stack.
+ DEBUG_CODE_COMMENT("fill values array");
+ int index = encoded_size;
+ auto* sig = imm.exception->sig;
+ for (size_t param_idx = sig->parameter_count(); param_idx > 0;
+ --param_idx) {
+ ValueType type = sig->GetParam(param_idx - 1);
+ if (type != kWasmI32) {
+ unsupported(decoder, kExceptionHandling,
+ "unsupported type in exception payload");
+ return;
+ }
+ StoreExceptionValue(type, values_array.gp(), &index, pinned);
+ }
+ DCHECK_EQ(0, index);
+
+ // Load the exception tag.
+ DEBUG_CODE_COMMENT("load exception tag");
+ Register exception_tag =
+ pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(exception_tag, ExceptionsTable, pinned);
+ __ LoadTaggedPointer(
+ exception_tag, exception_tag, no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
+
+ // Finally, call WasmThrow.
+ DEBUG_CODE_COMMENT("call WasmThrow builtin");
+ compiler::CallDescriptor* throw_descriptor =
+ GetBuiltinCallDescriptor<WasmThrowDescriptor>(compilation_zone_);
+
+ ValueKind throw_sig_reps[] = {kPointerValueType, kPointerValueType};
+ ValueKindSig throw_sig(0, 2, throw_sig_reps);
+
+ __ PrepareBuiltinCall(
+ &throw_sig, throw_descriptor,
+ {LiftoffAssembler::VarState{kPointerValueType,
+ LiftoffRegister{exception_tag}, 0},
+ LiftoffAssembler::VarState{kPointerValueType, values_array, 0}});
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ __ CallRuntimeStub(WasmCode::kWasmThrow);
+ DefineSafepoint();
+ }
+
void Rethrow(FullDecoder* decoder, const Value& exception) {
unsupported(decoder, kExceptionHandling, "rethrow");
}
@@ -3309,7 +3702,7 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
@@ -3321,7 +3714,7 @@ class LiftoffCompiler {
void AtomicLoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm) {
- ValueType value_type = type.value_type();
+ ValueKind kind = type.value_type().kind();
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDoForceCheck);
@@ -3333,11 +3726,11 @@ class LiftoffCompiler {
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
- RegClass rc = reg_class_for(value_type);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
+ RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ AtomicLoad(value, addr, index, offset, type, pinned);
- __ PushRegister(value_type, value);
+ __ PushRegister(kind, value);
if (FLAG_trace_wasm_memory) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
@@ -3351,7 +3744,7 @@ class LiftoffCompiler {
uintptr_t, LiftoffRegister,
LiftoffRegister,
StoreType)) {
- ValueType result_type = type.value_type();
+ ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
#ifdef V8_TARGET_ARCH_IA32
@@ -3362,7 +3755,7 @@ class LiftoffCompiler {
LiftoffRegister result = value;
if (__ cache_state()->is_used(value)) {
result = pinned.set(__ GetUnusedRegister(value.reg_class(), pinned));
- __ Move(result, value, result_type);
+ __ Move(result, value, result_kind);
pinned.clear(value);
value = result;
}
@@ -3381,10 +3774,10 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
(asm_.*emit_fn)(addr, index, offset, value, result, type);
- __ PushRegister(result_type, result);
+ __ PushRegister(result_kind, result);
}
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
@@ -3405,7 +3798,7 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
pinned.clear(LiftoffRegister(index));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
@@ -3420,10 +3813,10 @@ class LiftoffCompiler {
// assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
type);
- __ PushRegister(type.value_type(), result);
+ __ PushRegister(type.value_type().kind(), result);
return;
#else
- ValueType result_type = type.value_type();
+ ValueKind result_kind = type.value_type().kind();
LiftoffRegList pinned;
LiftoffRegister new_value = pinned.set(__ PopToRegister());
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));
@@ -3437,13 +3830,13 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister result =
- pinned.set(__ GetUnusedRegister(reg_class_for(result_type), pinned));
+ pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
__ AtomicCompareExchange(addr, index, offset, expected, new_value, result,
type);
- __ PushRegister(result_type, result);
+ __ PushRegister(result_kind, result);
#endif
}
@@ -3459,15 +3852,15 @@ class LiftoffCompiler {
StubCallMode::kCallWasmRuntimeStub); // stub call mode
}
- void AtomicWait(FullDecoder* decoder, ValueType type,
+ void AtomicWait(FullDecoder* decoder, ValueKind kind,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(2, {});
Register index_reg =
- BoundsCheckMem(decoder, type.element_size_bytes(), imm.offset,
+ BoundsCheckMem(decoder, element_size_bytes(kind), imm.offset,
full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
- AlignmentCheckMem(decoder, type.element_size_bytes(), imm.offset, index_reg,
+ AlignmentCheckMem(decoder, element_size_bytes(kind), imm.offset, index_reg,
pinned);
uintptr_t offset = imm.offset;
@@ -3494,7 +3887,7 @@ class LiftoffCompiler {
WasmCode::RuntimeStubId target;
compiler::CallDescriptor* call_descriptor;
- if (type == kWasmI32) {
+ if (kind == kI32) {
if (kNeedI64RegPair) {
target = WasmCode::kWasmI32AtomicWait32;
call_descriptor =
@@ -3520,8 +3913,8 @@ class LiftoffCompiler {
}
}
- ValueType sig_reps[] = {kPointerValueType, type, kWasmI64};
- FunctionSig sig(0, 3, sig_reps);
+ ValueKind sig_reps[] = {kPointerValueType, kind, kI64};
+ ValueKindSig sig(0, 3, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{index, expected_value, timeout});
@@ -3532,19 +3925,17 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
void AtomicNotify(FullDecoder* decoder,
const MemoryAccessImmediate<validate>& imm) {
LiftoffRegister full_index = __ PeekToRegister(1, {});
- Register index_reg =
- BoundsCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
- full_index, {}, kDoForceCheck);
+ Register index_reg = BoundsCheckMem(decoder, kInt32Size, imm.offset,
+ full_index, {}, kDoForceCheck);
if (index_reg == no_reg) return;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index_reg);
- AlignmentCheckMem(decoder, kWasmI32.element_size_bytes(), imm.offset,
- index_reg, pinned);
+ AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
@@ -3558,8 +3949,8 @@ class LiftoffCompiler {
__ emit_ptrsize_addi(index_plus_offset, index_plus_offset, offset);
}
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32};
+ ValueKindSig sig(1, 2, sig_reps);
auto call_descriptor =
GetBuiltinCallDescriptor<WasmAtomicNotifyDescriptor>(compilation_zone_);
@@ -3575,7 +3966,7 @@ class LiftoffCompiler {
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ PushRegister(kWasmI32, LiftoffRegister(kReturnRegister0));
+ __ PushRegister(kI32, LiftoffRegister(kReturnRegister0));
}
#define ATOMIC_STORE_LIST(V) \
@@ -3685,10 +4076,10 @@ class LiftoffCompiler {
#undef ATOMIC_COMPARE_EXCHANGE_OP
case kExprI32AtomicWait:
- AtomicWait(decoder, kWasmI32, imm);
+ AtomicWait(decoder, kI32, imm);
break;
case kExprI64AtomicWait:
- AtomicWait(decoder, kWasmI64, imm);
+ AtomicWait(decoder, kI64, imm);
break;
case kExprAtomicNotify:
AtomicNotify(decoder, imm);
@@ -3721,18 +4112,17 @@ class LiftoffCompiler {
__ LoadConstant(segment_index, WasmValue(imm.data_segment_index));
ExternalReference ext_ref = ExternalReference::wasm_memory_init();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32,
- kWasmI32, kWasmI32, kWasmI32};
- FunctionSig sig(1, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32, kI32};
+ ValueKindSig sig(1, 5, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src,
segment_index, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void DataDrop(FullDecoder* decoder, const DataDropImmediate<validate>& imm) {
@@ -3740,13 +4130,13 @@ class LiftoffCompiler {
Register seg_size_array =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
- LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(seg_size_array, DataSegmentSizes, kSystemPointerSize,
+ pinned);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
// Scale the seg_index for the array access.
- __ LoadConstant(seg_index,
- WasmValue(imm.index << kWasmI32.element_size_log2()));
+ __ LoadConstant(seg_index, WasmValue(imm.index << element_size_log2(kI32)));
// Set the length of the segment to '0' to drop it.
LiftoffRegister null_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -3765,17 +4155,16 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_copy();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
- kWasmI32};
- FunctionSig sig(1, 4, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
+ ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, src, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
}
void MemoryFill(FullDecoder* decoder,
@@ -3788,17 +4177,23 @@ class LiftoffCompiler {
Register instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ FillInstanceInto(instance);
ExternalReference ext_ref = ExternalReference::wasm_memory_fill();
- ValueType sig_reps[] = {kWasmI32, kPointerValueType, kWasmI32, kWasmI32,
- kWasmI32};
- FunctionSig sig(1, 4, sig_reps);
+ ValueKind sig_reps[] = {kI32, kPointerValueType, kI32, kI32, kI32};
+ ValueKindSig sig(1, 4, sig_reps);
LiftoffRegister args[] = {LiftoffRegister(instance), dst, value, size};
// We don't need the instance anymore after the call. We can use the
// register for the result.
LiftoffRegister result(instance);
- GenerateCCall(&result, &sig, kWasmStmt, args, ext_ref);
+ GenerateCCall(&result, &sig, kStmt, args, ext_ref);
Label* trap_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapMemOutOfBounds);
- __ emit_cond_jump(kEqual, trap_label, kWasmI32, result.gp());
+ __ emit_cond_jump(kEqual, trap_label, kI32, result.gp());
+ }
+
+ void LoadSmi(LiftoffRegister reg, int value) {
+ Address smi_value = Smi::FromInt(value).ptr();
+ using smi_type =
+ std::conditional_t<kSmiValueType == kI32, int32_t, int64_t>;
+ __ LoadConstant(reg, WasmValue{static_cast<smi_type>(smi_value)});
}
void TableInit(FullDecoder* decoder, const TableInitImmediate<validate>& imm,
@@ -3807,24 +4202,13 @@ class LiftoffCompiler {
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
-#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
- WasmValue table_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table.index).ptr()));
- WasmValue segment_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
-#else
- WasmValue table_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table.index).ptr()));
- WasmValue segment_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.elem_segment_index).ptr()));
-#endif
- __ LoadConstant(table_index_reg, table_index_val);
+ LoadSmi(table_index_reg, imm.table.index);
LiftoffAssembler::VarState table_index(kPointerValueType, table_index_reg,
0);
LiftoffRegister segment_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(segment_index_reg, segment_index_val);
+ LoadSmi(segment_index_reg, imm.elem_segment_index);
LiftoffAssembler::VarState segment_index(kPointerValueType,
segment_index_reg, 0);
@@ -3836,9 +4220,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableInitDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
- table_index_val.type(), segment_index_val.type()};
- FunctionSig sig(0, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
+ ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_index, segment_index});
@@ -3856,7 +4239,7 @@ class LiftoffCompiler {
Register dropped_elem_segments =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(dropped_elem_segments, DroppedElemSegments,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
LiftoffRegister seg_index =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
@@ -3874,27 +4257,15 @@ class LiftoffCompiler {
Vector<Value> args) {
LiftoffRegList pinned;
-#if V8_TARGET_ARCH_32_BIT || defined(V8_COMPRESS_POINTERS)
- WasmValue table_dst_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table_dst.index).ptr()));
- WasmValue table_src_index_val(
- static_cast<uint32_t>(Smi::FromInt(imm.table_src.index).ptr()));
-#else
- WasmValue table_dst_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table_dst.index).ptr()));
- WasmValue table_src_index_val(
- static_cast<uint64_t>(Smi::FromInt(imm.table_src.index).ptr()));
-#endif
-
LiftoffRegister table_dst_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(table_dst_index_reg, table_dst_index_val);
+ LoadSmi(table_dst_index_reg, imm.table_dst.index);
LiftoffAssembler::VarState table_dst_index(kPointerValueType,
table_dst_index_reg, 0);
LiftoffRegister table_src_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- __ LoadConstant(table_src_index_reg, table_src_index_val);
+ LoadSmi(table_src_index_reg, imm.table_src.index);
LiftoffAssembler::VarState table_src_index(kPointerValueType,
table_src_index_reg, 0);
@@ -3906,10 +4277,8 @@ class LiftoffCompiler {
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmTableCopyDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {kWasmI32, kWasmI32, kWasmI32,
- table_dst_index_val.type(),
- table_src_index_val.type()};
- FunctionSig sig(0, 5, sig_reps);
+ ValueKind sig_reps[] = {kI32, kI32, kI32, kSmiValueType, kSmiValueType};
+ ValueKindSig sig(0, 5, sig_reps);
__ PrepareBuiltinCall(&sig, call_descriptor,
{dst, src, size, table_dst_index, table_src_index});
@@ -3940,13 +4309,12 @@ class LiftoffCompiler {
void StructNew(FullDecoder* decoder,
const StructIndexImmediate<validate>& imm, const Value& rtt,
bool initial_values_on_stack) {
- ValueType struct_value_type = ValueType::Ref(imm.index, kNonNullable);
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateStructWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateStructWithRttDescriptor>(
compilation_zone_);
- ValueType sig_reps[] = {struct_value_type, rtt.type};
- FunctionSig sig(1, 1, sig_reps);
+ ValueKind sig_reps[] = {kRef, rtt.type.kind()};
+ ValueKindSig sig(1, 1, sig_reps);
LiftoffAssembler::VarState rtt_value =
__ cache_state()->stack_state.end()[-1];
__ PrepareBuiltinCall(&sig, call_descriptor, {rtt_value});
@@ -3960,19 +4328,19 @@ class LiftoffCompiler {
for (uint32_t i = imm.struct_type->field_count(); i > 0;) {
i--;
int offset = StructFieldOffset(imm.struct_type, i);
- ValueType field_type = imm.struct_type->field(i);
+ ValueKind field_kind = imm.struct_type->field(i).kind();
LiftoffRegister value = initial_values_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
- reg_class_for(field_type), pinned));
+ reg_class_for(field_kind), pinned));
if (!initial_values_on_stack) {
- if (!CheckSupportedType(decoder, field_type, "default value")) return;
- SetDefaultValue(value, field_type, pinned);
+ if (!CheckSupportedType(decoder, field_kind, "default value")) return;
+ SetDefaultValue(value, field_kind, pinned);
}
- StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
- __ PushRegister(struct_value_type, obj);
+ __ PushRegister(kRef, obj);
}
void StructNewWithRtt(FullDecoder* decoder,
@@ -3991,34 +4359,34 @@ class LiftoffCompiler {
const FieldIndexImmediate<validate>& field, bool is_signed,
Value* result) {
const StructType* struct_type = field.struct_index.struct_type;
- ValueType field_type = struct_type->field(field.index);
- if (!CheckSupportedType(decoder, field_type, "field load")) return;
+ ValueKind field_kind = struct_type->field(field.index).kind();
+ if (!CheckSupportedType(decoder, field_kind, "field load")) return;
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
LiftoffRegister value =
- pinned.set(__ GetUnusedRegister(reg_class_for(field_type), pinned));
- LoadObjectField(value, obj.gp(), no_reg, offset, field_type, is_signed,
+ __ GetUnusedRegister(reg_class_for(field_kind), pinned);
+ LoadObjectField(value, obj.gp(), no_reg, offset, field_kind, is_signed,
pinned);
- __ PushRegister(field_type.Unpacked(), value);
+ __ PushRegister(unpacked(field_kind), value);
}
void StructSet(FullDecoder* decoder, const Value& struct_obj,
const FieldIndexImmediate<validate>& field,
const Value& field_value) {
const StructType* struct_type = field.struct_index.struct_type;
- ValueType field_type = struct_type->field(field.index);
+ ValueKind field_kind = struct_type->field(field.index).kind();
int offset = StructFieldOffset(struct_type, field.index);
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, struct_obj.type);
- StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_type);
+ StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
}
void ArrayNew(FullDecoder* decoder, const ArrayIndexImmediate<validate>& imm,
- ValueType rtt_type, bool initial_value_on_stack) {
+ ValueKind rtt_type, bool initial_value_on_stack) {
// Max length check.
{
LiftoffRegister length =
@@ -4028,24 +4396,23 @@ class LiftoffCompiler {
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
static_cast<int>(wasm::kV8MaxWasmArrayLength));
}
- ValueType array_value_type = ValueType::Ref(imm.index, kNonNullable);
- ValueType elem_type = imm.array_type->element_type();
- int elem_size = elem_type.element_size_bytes();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ int elem_size = element_size_bytes(elem_kind);
// Allocate the array.
{
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateArrayWithRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateArrayWithRttDescriptor>(
compilation_zone_);
- ValueType sig_reps[] = {array_value_type, rtt_type, kWasmI32, kWasmI32};
- FunctionSig sig(1, 3, sig_reps);
+ ValueKind sig_reps[] = {kRef, rtt_type, kI32, kI32};
+ ValueKindSig sig(1, 3, sig_reps);
LiftoffAssembler::VarState rtt_var =
__ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState length_var =
__ cache_state()->stack_state.end()[-2];
LiftoffRegister elem_size_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(elem_size_reg, WasmValue(elem_size));
- LiftoffAssembler::VarState elem_size_var(kWasmI32, elem_size_reg, 0);
+ LiftoffAssembler::VarState elem_size_var(kI32, elem_size_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor,
{rtt_var, length_var, elem_size_var});
__ CallRuntimeStub(target);
@@ -4060,10 +4427,10 @@ class LiftoffCompiler {
LiftoffRegister value = initial_value_on_stack
? pinned.set(__ PopToRegister(pinned))
: pinned.set(__ GetUnusedRegister(
- reg_class_for(elem_type), pinned));
+ reg_class_for(elem_kind), pinned));
if (!initial_value_on_stack) {
- if (!CheckSupportedType(decoder, elem_type, "default value")) return;
- SetDefaultValue(value, elem_type, pinned);
+ if (!CheckSupportedType(decoder, elem_kind, "default value")) return;
+ SetDefaultValue(value, elem_kind, pinned);
}
// Initialize the array's elements.
@@ -4072,34 +4439,34 @@ class LiftoffCompiler {
offset,
WasmValue(wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize)));
LiftoffRegister end_offset = length;
- if (elem_type.element_size_log2() != 0) {
+ if (element_size_log2(elem_kind) != 0) {
__ emit_i32_shli(end_offset.gp(), length.gp(),
- elem_type.element_size_log2());
+ element_size_log2(elem_kind));
}
__ emit_i32_add(end_offset.gp(), end_offset.gp(), offset.gp());
Label loop, done;
__ bind(&loop);
- __ emit_cond_jump(kUnsignedGreaterEqual, &done, kWasmI32, offset.gp(),
+ __ emit_cond_jump(kUnsignedGreaterEqual, &done, kI32, offset.gp(),
end_offset.gp());
- StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_type);
+ StoreObjectField(obj.gp(), offset.gp(), 0, value, pinned, elem_kind);
__ emit_i32_addi(offset.gp(), offset.gp(), elem_size);
__ emit_jump(&loop);
__ bind(&done);
- __ PushRegister(array_value_type, obj);
+ __ PushRegister(kRef, obj);
}
void ArrayNewWithRtt(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length_value, const Value& initial_value,
const Value& rtt, Value* result) {
- ArrayNew(decoder, imm, rtt.type, true);
+ ArrayNew(decoder, imm, rtt.type.kind(), true);
}
void ArrayNewDefault(FullDecoder* decoder,
const ArrayIndexImmediate<validate>& imm,
const Value& length, const Value& rtt, Value* result) {
- ArrayNew(decoder, imm, rtt.type, false);
+ ArrayNew(decoder, imm, rtt.type.kind(), false);
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
@@ -4110,17 +4477,17 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
- ValueType elem_type = imm.array_type->element_type();
- if (!CheckSupportedType(decoder, elem_type, "array load")) return;
- int elem_size_shift = elem_type.element_size_log2();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ if (!CheckSupportedType(decoder, elem_kind, "array load")) return;
+ int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
LiftoffRegister value = __ GetUnusedRegister(kGpReg, {array}, pinned);
LoadObjectField(value, array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
- elem_type, is_signed, pinned);
- __ PushRegister(elem_type.Unpacked(), value);
+ elem_kind, is_signed, pinned);
+ __ PushRegister(unpacked(elem_kind), value);
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
@@ -4132,25 +4499,24 @@ class LiftoffCompiler {
LiftoffRegister array = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, array.gp(), pinned, array_obj.type);
BoundsCheck(decoder, array, index, pinned);
- ValueType elem_type = imm.array_type->element_type();
- int elem_size_shift = elem_type.element_size_log2();
+ ValueKind elem_kind = imm.array_type->element_type().kind();
+ int elem_size_shift = element_size_log2(elem_kind);
if (elem_size_shift != 0) {
__ emit_i32_shli(index.gp(), index.gp(), elem_size_shift);
}
StoreObjectField(array.gp(), index.gp(),
wasm::ObjectAccess::ToTagged(WasmArray::kHeaderSize),
- value, pinned, elem_type);
+ value, pinned, elem_kind);
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, array_obj.type);
- LiftoffRegister len = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister len = __ GetUnusedRegister(kGpReg, pinned);
int kLengthOffset = wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
- LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kWasmI32, false,
- pinned);
- __ PushRegister(kWasmI32, len);
+ LoadObjectField(len, obj.gp(), no_reg, kLengthOffset, kI32, false, pinned);
+ __ PushRegister(kI32, len);
}
// 1 bit Smi tag, 31 bits Smi shift, 1 bit i31ref high-bit truncation.
@@ -4166,7 +4532,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shli(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI31Ref, dst);
+ __ PushRegister(kRef, dst);
}
void I31GetS(FullDecoder* decoder, const Value& input, Value* result) {
@@ -4178,7 +4544,7 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_sari(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
}
void I31GetU(FullDecoder* decoder, const Value& input, Value* result) {
@@ -4190,63 +4556,32 @@ class LiftoffCompiler {
DCHECK(SmiValuesAre32Bits());
__ emit_i64_shri(dst, src, kI31To32BitSmiShift);
}
- __ PushRegister(kWasmI32, dst);
+ __ PushRegister(kI32, dst);
}
- void RttCanon(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- Value* result) {
+ void RttCanon(FullDecoder* decoder, uint32_t type_index, Value* result) {
LiftoffRegister rtt = __ GetUnusedRegister(kGpReg, {});
- RootIndex index;
- switch (imm.type.representation()) {
- case wasm::HeapType::kEq:
- index = RootIndex::kWasmRttEqrefMap;
- break;
- case wasm::HeapType::kExtern:
- index = RootIndex::kWasmRttExternrefMap;
- break;
- case wasm::HeapType::kFunc:
- index = RootIndex::kWasmRttFuncrefMap;
- break;
- case wasm::HeapType::kI31:
- index = RootIndex::kWasmRttI31refMap;
- break;
- case wasm::HeapType::kAny:
- index = RootIndex::kWasmRttAnyrefMap;
- break;
- case wasm::HeapType::kBottom:
- UNREACHABLE();
- default:
- // User-defined type.
- LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps);
- __ LoadTaggedPointer(
- rtt.gp(), rtt.gp(), no_reg,
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
- imm.type.ref_index()),
- {});
- __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
- return;
- }
- LOAD_INSTANCE_FIELD(rtt.gp(), IsolateRoot, kSystemPointerSize);
- __ LoadTaggedPointer(rtt.gp(), rtt.gp(), no_reg,
- IsolateData::root_slot_offset(index), {});
- __ PushRegister(ValueType::Rtt(imm.type, 1), rtt);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(rtt.gp(), ManagedObjectMaps, {});
+ __ LoadTaggedPointer(
+ rtt.gp(), rtt.gp(), no_reg,
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(type_index), {});
+ __ PushRegister(kRttWithDepth, rtt);
}
- void RttSub(FullDecoder* decoder, const HeapTypeImmediate<validate>& imm,
- const Value& parent, Value* result) {
- ValueType parent_value_type = parent.type;
- ValueType rtt_value_type =
- ValueType::Rtt(imm.type, parent_value_type.depth() + 1);
+ void RttSub(FullDecoder* decoder, uint32_t type_index, const Value& parent,
+ Value* result) {
+ ValueKind parent_value_kind = parent.type.kind();
+ ValueKind rtt_value_type = kRttWithDepth;
WasmCode::RuntimeStubId target = WasmCode::kWasmAllocateRtt;
compiler::CallDescriptor* call_descriptor =
GetBuiltinCallDescriptor<WasmAllocateRttDescriptor>(compilation_zone_);
- ValueType sig_reps[] = {rtt_value_type, kWasmI32, parent_value_type};
- FunctionSig sig(1, 2, sig_reps);
+ ValueKind sig_reps[] = {rtt_value_type, kI32, parent_value_kind};
+ ValueKindSig sig(1, 2, sig_reps);
LiftoffAssembler::VarState parent_var =
__ cache_state()->stack_state.end()[-1];
LiftoffRegister type_reg = __ GetUnusedRegister(kGpReg, {});
- __ LoadConstant(type_reg, WasmValue(imm.type.representation()));
- LiftoffAssembler::VarState type_var(kWasmI32, type_reg, 0);
+ __ LoadConstant(type_reg, WasmValue(type_index));
+ LiftoffAssembler::VarState type_var(kI32, type_reg, 0);
__ PrepareBuiltinCall(&sig, call_descriptor, {type_var, parent_var});
__ CallRuntimeStub(target);
DefineSafepoint();
@@ -4255,67 +4590,69 @@ class LiftoffCompiler {
__ PushRegister(rtt_value_type, LiftoffRegister(kReturnRegister0));
}
+ enum NullSucceeds : bool { // --
+ kNullSucceeds = true,
+ kNullFails = false
+ };
+
// Falls through on match (=successful type check).
// Returns the register containing the object.
LiftoffRegister SubtypeCheck(FullDecoder* decoder, const Value& obj,
const Value& rtt, Label* no_match,
+ NullSucceeds null_succeeds,
LiftoffRegList pinned = {},
Register opt_scratch = no_reg) {
Label match;
LiftoffRegister rtt_reg = pinned.set(__ PopToRegister(pinned));
LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
- bool obj_can_be_i31 = IsSubtypeOf(kWasmI31Ref, obj.type, decoder->module_);
- bool rtt_is_i31 = rtt.type.heap_representation() == HeapType::kI31;
- bool i31_check_only = obj_can_be_i31 && rtt_is_i31;
- if (i31_check_only) {
- __ emit_smi_check(obj_reg.gp(), no_match,
- LiftoffAssembler::kJumpOnNotSmi);
- // Emit no further code, just fall through to {match}.
- } else {
- // Reserve all temporary registers up front, so that the cache state
- // tracking doesn't get confused by the following conditional jumps.
- LiftoffRegister tmp1 =
- opt_scratch != no_reg
- ? LiftoffRegister(opt_scratch)
- : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- if (obj_can_be_i31) {
- DCHECK(!rtt_is_i31);
- __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
- }
- if (obj.type.is_nullable()) {
- LoadNullValue(tmp1.gp(), pinned);
- __ emit_cond_jump(kEqual, no_match, obj.type, obj_reg.gp(), tmp1.gp());
- }
-
- // At this point, the object is neither null nor an i31ref. Perform
- // a regular type check. Check for exact match first.
- __ LoadMap(tmp1.gp(), obj_reg.gp());
- // {tmp1} now holds the object's map.
- __ emit_cond_jump(kEqual, &match, rtt.type, tmp1.gp(), rtt_reg.gp());
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, null_succeeds ? &match : no_match,
+ obj.type.kind(), obj_reg.gp(), tmp1.gp());
+ }
- // If the object isn't guaranteed to be an array or struct, check that.
- // Subsequent code wouldn't handle e.g. funcrefs.
- if (!is_data_ref_type(obj.type, decoder->module_)) {
- EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
- }
+ // Perform a regular type check. Check for exact match first.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ // {tmp1} now holds the object's map.
+
+ if (decoder->module_->has_signature(rtt.type.ref_index())) {
+ // Function case: currently, the only way for a function to match an rtt
+ // is if its map is equal to that rtt.
+ __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
+ rtt_reg.gp());
+ __ bind(&match);
+ return obj_reg;
+ }
- // Constant-time subtyping check: load exactly one candidate RTT from the
- // supertypes list.
- // Step 1: load the WasmTypeInfo into {tmp1}.
- constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
- Map::kConstructorOrBackPointerOrNativeContextOffset);
- __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset,
- pinned);
- // Step 2: load the super types list into {tmp1}.
- constexpr int kSuperTypesOffset =
- wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
- __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
- pinned);
- // Step 3: check the list's length.
- LiftoffRegister list_length = tmp2;
- __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ // Array/struct case until the rest of the function.
+
+ // Check for rtt equality, and if not, check if the rtt is a struct/array
+ // rtt.
+ __ emit_cond_jump(kEqual, &match, rtt.type.kind(), tmp1.gp(), rtt_reg.gp());
+
+ // Constant-time subtyping check: load exactly one candidate RTT from the
+ // supertypes list.
+ // Step 1: load the WasmTypeInfo into {tmp1}.
+ constexpr int kTypeInfoOffset = wasm::ObjectAccess::ToTagged(
+ Map::kConstructorOrBackPointerOrNativeContextOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kTypeInfoOffset, pinned);
+ // Step 2: load the super types list into {tmp1}.
+ constexpr int kSuperTypesOffset =
+ wasm::ObjectAccess::ToTagged(WasmTypeInfo::kSupertypesOffset);
+ __ LoadTaggedPointer(tmp1.gp(), tmp1.gp(), no_reg, kSuperTypesOffset,
+ pinned);
+ // Step 3: check the list's length.
+ LiftoffRegister list_length = tmp2;
+ __ LoadFixedArrayLengthAsInt32(list_length, tmp1.gp(), pinned);
+ if (rtt.type.has_depth()) {
__ emit_i32_cond_jumpi(kUnsignedLessEqual, no_match, list_length.gp(),
rtt.type.depth());
// Step 4: load the candidate list slot into {tmp1}, and compare it.
@@ -4323,20 +4660,41 @@ class LiftoffCompiler {
tmp1.gp(), tmp1.gp(), no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(rtt.type.depth()),
pinned);
- __ emit_cond_jump(kUnequal, no_match, rtt.type, tmp1.gp(), rtt_reg.gp());
- // Fall through to {match}.
+ __ emit_cond_jump(kUnequal, no_match, rtt.type.kind(), tmp1.gp(),
+ rtt_reg.gp());
+ } else {
+ // Preserve {obj_reg} across the call.
+ LiftoffRegList saved_regs = LiftoffRegList::ForRegs(obj_reg);
+ __ PushRegisters(saved_regs);
+ WasmCode::RuntimeStubId target = WasmCode::kWasmSubtypeCheck;
+ compiler::CallDescriptor* call_descriptor =
+ GetBuiltinCallDescriptor<WasmSubtypeCheckDescriptor>(
+ compilation_zone_);
+ ValueKind sig_reps[] = {kI32, kOptRef, rtt.type.kind()};
+ ValueKindSig sig(1, 2, sig_reps);
+ LiftoffAssembler::VarState rtt_state(kPointerValueType, rtt_reg, 0);
+ LiftoffAssembler::VarState tmp1_state(kPointerValueType, tmp1, 0);
+ __ PrepareBuiltinCall(&sig, call_descriptor, {tmp1_state, rtt_state});
+ __ CallRuntimeStub(target);
+ DefineSafepoint();
+ __ PopRegisters(saved_regs);
+ __ Move(tmp1.gp(), kReturnRegister0, kI32);
+ __ emit_i32_cond_jumpi(kEqual, no_match, tmp1.gp(), 0);
}
+
+ // Fall through to {match}.
__ bind(&match);
return obj_reg;
}
void RefTest(FullDecoder* decoder, const Value& obj, const Value& rtt,
- Value* result_val) {
+ Value* /* result_val */) {
Label return_false, done;
LiftoffRegList pinned;
LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
- SubtypeCheck(decoder, obj, rtt, &return_false, pinned, result.gp());
+ SubtypeCheck(decoder, obj, rtt, &return_false, kNullFails, pinned,
+ result.gp());
__ LoadConstant(result, WasmValue(1));
// TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
@@ -4345,16 +4703,16 @@ class LiftoffCompiler {
__ bind(&return_false);
__ LoadConstant(result, WasmValue(0));
__ bind(&done);
- __ PushRegister(kWasmI32, result);
+ __ PushRegister(kI32, result);
}
void RefCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
Value* result) {
Label* trap_label = AddOutOfLineTrap(decoder->position(),
WasmCode::kThrowWasmTrapIllegalCast);
- LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, trap_label);
- __ PushRegister(ValueType::Ref(rtt.type.heap_type(), kNonNullable),
- obj_reg);
+ LiftoffRegister obj_reg =
+ SubtypeCheck(decoder, obj, rtt, trap_label, kNullSucceeds);
+ __ PushRegister(obj.type.kind(), obj_reg);
}
void BrOnCast(FullDecoder* decoder, const Value& obj, const Value& rtt,
@@ -4367,18 +4725,188 @@ class LiftoffCompiler {
}
Label cont_false;
- LiftoffRegister obj_reg = SubtypeCheck(decoder, obj, rtt, &cont_false);
+ LiftoffRegister obj_reg =
+ SubtypeCheck(decoder, obj, rtt, &cont_false, kNullFails);
- __ PushRegister(rtt.type.is_bottom()
- ? kWasmBottom
- : ValueType::Ref(rtt.type.heap_type(), kNonNullable),
- obj_reg);
+ __ PushRegister(rtt.type.is_bottom() ? kBottom : obj.type.kind(), obj_reg);
BrOrRet(decoder, depth);
__ bind(&cont_false);
// Drop the branch's value, restore original value.
Drop(decoder);
- __ PushRegister(obj.type, obj_reg);
+ __ PushRegister(obj.type.kind(), obj_reg);
+ }
+
+ // Abstract type checkers. They all return the object register and fall
+ // through to match.
+ LiftoffRegister DataCheck(const Value& obj, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister tmp2 = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
+ }
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+
+ // Load the object's map and check if it is a struct/array map.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ EmitDataRefCheck(tmp1.gp(), no_match, tmp2, pinned);
+
+ return obj_reg;
+ }
+
+ LiftoffRegister FuncCheck(const Value& obj, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ // Reserve all temporary registers up front, so that the cache state
+ // tracking doesn't get confused by the following conditional jumps.
+ LiftoffRegister tmp1 =
+ opt_scratch != no_reg
+ ? LiftoffRegister(opt_scratch)
+ : pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ if (obj.type.is_nullable()) {
+ LoadNullValue(tmp1.gp(), pinned);
+ __ emit_cond_jump(kEqual, no_match, kOptRef, obj_reg.gp(), tmp1.gp());
+ }
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnSmi);
+
+ // Load the object's map and check if its InstaceType field is that of a
+ // function.
+ __ LoadMap(tmp1.gp(), obj_reg.gp());
+ __ Load(tmp1, tmp1.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U, pinned);
+ __ emit_i32_cond_jumpi(kUnequal, no_match, tmp1.gp(), JS_FUNCTION_TYPE);
+
+ return obj_reg;
+ }
+
+ LiftoffRegister I31Check(const Value& object, Label* no_match,
+ LiftoffRegList pinned, Register opt_scratch) {
+ LiftoffRegister obj_reg = pinned.set(__ PopToRegister(pinned));
+
+ __ emit_smi_check(obj_reg.gp(), no_match, LiftoffAssembler::kJumpOnNotSmi);
+
+ return obj_reg;
+ }
+
+ using TypeChecker = LiftoffRegister (LiftoffCompiler::*)(
+ const Value& obj, Label* no_match, LiftoffRegList pinned,
+ Register opt_scratch);
+
+ template <TypeChecker type_checker>
+ void AbstractTypeCheck(const Value& object) {
+ Label match, no_match, done;
+ LiftoffRegList pinned;
+ LiftoffRegister result = pinned.set(__ GetUnusedRegister(kGpReg, {}));
+
+ (this->*type_checker)(object, &no_match, pinned, result.gp());
+
+ __ bind(&match);
+ __ LoadConstant(result, WasmValue(1));
+ // TODO(jkummerow): Emit near jumps on platforms where it's more efficient.
+ __ emit_jump(&done);
+
+ __ bind(&no_match);
+ __ LoadConstant(result, WasmValue(0));
+ __ bind(&done);
+ __ PushRegister(kI32, result);
+ }
+
+ void RefIsData(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */) {
+ return AbstractTypeCheck<&LiftoffCompiler::DataCheck>(object);
+ }
+
+ void RefIsFunc(FullDecoder* /* decoder */, const Value& object,
+ Value* /* result_val */) {
+ return AbstractTypeCheck<&LiftoffCompiler::FuncCheck>(object);
+ }
+
+ void RefIsI31(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCheck<&LiftoffCompiler::I31Check>(object);
+ }
+
+ template <TypeChecker type_checker>
+ void AbstractTypeCast(const Value& object, FullDecoder* decoder,
+ ValueKind result_kind) {
+ Label* trap_label = AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapIllegalCast);
+ Label match;
+ LiftoffRegister obj_reg =
+ (this->*type_checker)(object, trap_label, {}, no_reg);
+ __ bind(&match);
+ __ PushRegister(result_kind, obj_reg);
+ }
+
+ void RefAsData(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCast<&LiftoffCompiler::DataCheck>(object, decoder, kRef);
+ }
+
+ void RefAsFunc(FullDecoder* decoder, const Value& object,
+ Value* /* result */) {
+ return AbstractTypeCast<&LiftoffCompiler::FuncCheck>(object, decoder, kRef);
+ }
+
+ void RefAsI31(FullDecoder* decoder, const Value& object, Value* result) {
+ return AbstractTypeCast<&LiftoffCompiler::I31Check>(object, decoder, kRef);
+ }
+
+ template <TypeChecker type_checker>
+ void BrOnAbstractType(const Value& object, FullDecoder* decoder,
+ uint32_t br_depth, ValueKind result_kind) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (br_depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(br_depth)->br_merge()->arity);
+ }
+
+ Label match, no_match;
+ LiftoffRegister obj_reg =
+ (this->*type_checker)(object, &no_match, {}, no_reg);
+
+ __ bind(&match);
+ __ PushRegister(result_kind, obj_reg);
+ BrOrRet(decoder, br_depth);
+
+ __ bind(&no_match);
+ // Drop the branch's value, restore original value.
+ Drop(decoder);
+ __ PushRegister(object.type.kind(), obj_reg);
+ }
+
+ void BrOnData(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::DataCheck>(object, decoder,
+ br_depth, kRef);
+ }
+
+ void BrOnFunc(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::FuncCheck>(object, decoder,
+ br_depth, kRef);
+ }
+
+ void BrOnI31(FullDecoder* decoder, const Value& object,
+ Value* /* value_on_branch */, uint32_t br_depth) {
+ return BrOnAbstractType<&LiftoffCompiler::I31Check>(object, decoder,
+ br_depth, kRef);
}
void Forward(FullDecoder* decoder, const Value& from, Value* to) {
@@ -4386,10 +4914,20 @@ class LiftoffCompiler {
}
private:
+ ValueKindSig* MakeKindSig(Zone* zone, const FunctionSig* sig) {
+ ValueKind* reps =
+ zone->NewArray<ValueKind>(sig->parameter_count() + sig->return_count());
+ ValueKind* ptr = reps;
+ for (ValueType type : sig->all()) *ptr++ = type.kind();
+ return zone->New<ValueKindSig>(sig->return_count(), sig->parameter_count(),
+ reps);
+ }
+
void CallDirect(FullDecoder* decoder,
const CallFunctionImmediate<validate>& imm,
const Value args[], Value returns[], CallKind call_kind) {
- for (ValueType ret : imm.sig->returns()) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
+ for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -4406,20 +4944,20 @@ class LiftoffCompiler {
Register imported_targets = tmp;
LOAD_INSTANCE_FIELD(imported_targets, ImportedFunctionTargets,
- kSystemPointerSize);
+ kSystemPointerSize, pinned);
__ Load(LiftoffRegister(target), imported_targets, no_reg,
imm.index * sizeof(Address), kPointerLoadType, pinned);
Register imported_function_refs = tmp;
LOAD_TAGGED_PTR_INSTANCE_FIELD(imported_function_refs,
- ImportedFunctionRefs);
+ ImportedFunctionRefs, pinned);
Register imported_function_ref = tmp;
__ LoadTaggedPointer(
imported_function_ref, imported_function_refs, no_reg,
ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), pinned);
Register* explicit_instance = &imported_function_ref;
- __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
@@ -4429,11 +4967,11 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(imm.sig, call_descriptor, target);
+ __ CallIndirect(sig, call_descriptor, target);
}
} else {
// A direct call within this module just gets the current instance.
- __ PrepareCall(imm.sig, call_descriptor);
+ __ PrepareCall(sig, call_descriptor);
// Just encode the function index. This will be patched at instantiation.
Address addr = static_cast<Address>(imm.index);
if (call_kind == kReturnCall) {
@@ -4453,16 +4991,17 @@ class LiftoffCompiler {
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(imm.sig, call_descriptor);
+ __ FinishCall(sig, call_descriptor);
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
const CallIndirectImmediate<validate>& imm,
CallKind call_kind) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, imm.sig);
if (imm.table_index != 0) {
return unsupported(decoder, kRefTypes, "table index != 0");
}
- for (ValueType ret : imm.sig->returns()) {
+ for (ValueKind ret : sig->returns()) {
if (!CheckSupportedType(decoder, ret, "return")) return;
}
@@ -4486,9 +5025,10 @@ class LiftoffCompiler {
// Compare against table size stored in
// {instance->indirect_function_table_size}.
- LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size);
- __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kWasmI32,
- index, tmp_const);
+ LOAD_INSTANCE_FIELD(tmp_const, IndirectFunctionTableSize, kUInt32Size,
+ pinned);
+ __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
+ tmp_const);
// Mask the index to prevent SSCA.
if (FLAG_untrusted_code_mitigations) {
@@ -4514,7 +5054,8 @@ class LiftoffCompiler {
DEBUG_CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableSigIds, kSystemPointerSize,
+ pinned);
// Shift {index} by 2 (multiply by 4) to represent kInt32Size items.
STATIC_ASSERT((1 << 2) == kInt32Size);
__ emit_i32_shli(index, index, 2);
@@ -4526,8 +5067,8 @@ class LiftoffCompiler {
Label* sig_mismatch_label = AddOutOfLineTrap(
decoder->position(), WasmCode::kThrowWasmTrapFuncSigMismatch);
- __ emit_cond_jump(kUnequal, sig_mismatch_label,
- LiftoffAssembler::kWasmIntPtr, scratch, tmp_const);
+ __ emit_cond_jump(kUnequal, sig_mismatch_label, LiftoffAssembler::kIntPtr,
+ scratch, tmp_const);
// At this point {index} has already been multiplied by 4.
DEBUG_CODE_COMMENT("Execute indirect call");
@@ -4539,7 +5080,7 @@ class LiftoffCompiler {
// At this point {index} has already been multiplied by kTaggedSize.
// Load the instance from {instance->ift_instances[key]}
- LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs);
+ LOAD_TAGGED_PTR_INSTANCE_FIELD(table, IndirectFunctionTableRefs, pinned);
__ LoadTaggedPointer(tmp_const, table, index,
ObjectAccess::ElementOffsetInTaggedFixedArray(0),
pinned);
@@ -4554,8 +5095,8 @@ class LiftoffCompiler {
Register* explicit_instance = &tmp_const;
// Load the target from {instance->ift_targets[key]}
- LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets,
- kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(table, IndirectFunctionTableTargets, kSystemPointerSize,
+ pinned);
__ Load(LiftoffRegister(scratch), table, index, 0, kPointerLoadType,
pinned);
@@ -4565,7 +5106,7 @@ class LiftoffCompiler {
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
Register target = scratch;
- __ PrepareCall(imm.sig, call_descriptor, &target, explicit_instance);
+ __ PrepareCall(sig, call_descriptor, &target, explicit_instance);
if (call_kind == kReturnCall) {
__ PrepareTailCall(
static_cast<int>(call_descriptor->StackParameterCount()),
@@ -4575,17 +5116,225 @@ class LiftoffCompiler {
} else {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
- __ CallIndirect(imm.sig, call_descriptor, target);
+ __ CallIndirect(sig, call_descriptor, target);
}
DefineSafepoint();
RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
- __ FinishCall(imm.sig, call_descriptor);
+ __ FinishCall(sig, call_descriptor);
+ }
+
+ void CallRef(FullDecoder* decoder, ValueType func_ref_type,
+ const FunctionSig* type_sig, CallKind call_kind) {
+ ValueKindSig* sig = MakeKindSig(compilation_zone_, type_sig);
+ for (ValueKind ret : sig->returns()) {
+ if (!CheckSupportedType(decoder, ret, "return")) return;
+ }
+ compiler::CallDescriptor* call_descriptor =
+ compiler::GetWasmCallDescriptor(compilation_zone_, type_sig);
+ call_descriptor =
+ GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
+
+ // Since this is a call instruction, we'll have to spill everything later
+ // anyway; do it right away so that the register state tracking doesn't
+ // get confused by the conditional builtin call below.
+ __ SpillAllRegisters();
+
+ // We limit ourselves to four registers:
+ // (1) func_data, initially reused for func_ref.
+ // (2) instance, initially used as temp.
+ // (3) target, initially used as temp.
+ // (4) temp.
+ LiftoffRegList pinned;
+ LiftoffRegister func_ref = pinned.set(__ PopToModifiableRegister(pinned));
+ MaybeEmitNullCheck(decoder, func_ref.gp(), pinned, func_ref_type);
+ LiftoffRegister instance = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+
+ LiftoffRegister func_data = func_ref;
+ __ LoadTaggedPointer(
+ func_data.gp(), func_ref.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(JSFunction::kSharedFunctionInfoOffset),
+ pinned);
+ __ LoadTaggedPointer(
+ func_data.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
+ pinned);
+
+ LiftoffRegister data_type = instance;
+ __ LoadMap(data_type.gp(), func_data.gp());
+ __ Load(data_type, data_type.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
+ LoadType::kI32Load16U, pinned);
+
+ Label is_js_function, perform_call;
+ __ emit_i32_cond_jumpi(kEqual, &is_js_function, data_type.gp(),
+ WASM_JS_FUNCTION_DATA_TYPE);
+ // End of {data_type}'s live range.
+
+ {
+ // Call to a WasmExportedFunction.
+
+ LiftoffRegister callee_instance = instance;
+ __ LoadTaggedPointer(callee_instance.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kInstanceOffset),
+ pinned);
+ LiftoffRegister func_index = target;
+ __ LoadTaggedSignedAsInt32(
+ func_index, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kFunctionIndexOffset),
+ pinned);
+ LiftoffRegister imported_function_refs = temp;
+ __ LoadTaggedPointer(imported_function_refs.gp(), callee_instance.gp(),
+ no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionRefsOffset),
+ pinned);
+ // We overwrite {imported_function_refs} here, at the cost of having
+ // to reload it later, because we don't have more registers on ia32.
+ LiftoffRegister imported_functions_num = imported_function_refs;
+ __ LoadFixedArrayLengthAsInt32(imported_functions_num,
+ imported_function_refs.gp(), pinned);
+
+ Label imported;
+ __ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
+ imported_functions_num.gp());
+
+ {
+ // Function locally defined in module.
+
+ // {func_index} is invalid from here on.
+ LiftoffRegister jump_table_start = target;
+ __ Load(jump_table_start, callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kJumpTableStartOffset),
+ kPointerLoadType, pinned);
+ LiftoffRegister jump_table_offset = temp;
+ __ LoadTaggedSignedAsInt32(
+ jump_table_offset, func_data.gp(),
+ wasm::ObjectAccess::ToTagged(
+ WasmExportedFunctionData::kJumpTableOffsetOffset),
+ pinned);
+ __ emit_ptrsize_add(target.gp(), jump_table_start.gp(),
+ jump_table_offset.gp());
+ __ emit_jump(&perform_call);
+ }
+
+ {
+ // Function imported to module.
+ __ bind(&imported);
+
+ LiftoffRegister imported_function_targets = temp;
+ __ Load(imported_function_targets, callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionTargetsOffset),
+ kPointerLoadType, pinned);
+ // {callee_instance} is invalid from here on.
+ LiftoffRegister imported_instance = instance;
+ // Scale {func_index} to kTaggedSize.
+ __ emit_i32_shli(func_index.gp(), func_index.gp(), kTaggedSizeLog2);
+ // {func_data} is invalid from here on.
+ imported_function_refs = func_data;
+ __ LoadTaggedPointer(
+ imported_function_refs.gp(), callee_instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kImportedFunctionRefsOffset),
+ pinned);
+ __ LoadTaggedPointer(
+ imported_instance.gp(), imported_function_refs.gp(),
+ func_index.gp(),
+ wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0), pinned);
+ // Scale {func_index} to kSystemPointerSize.
+ if (kSystemPointerSize == kTaggedSize * 2) {
+ __ emit_i32_add(func_index.gp(), func_index.gp(), func_index.gp());
+ } else {
+ DCHECK_EQ(kSystemPointerSize, kTaggedSize);
+ }
+ // This overwrites the contents of {func_index}, which we don't need
+ // any more.
+ __ Load(target, imported_function_targets.gp(), func_index.gp(), 0,
+ kPointerLoadType, pinned);
+ __ emit_jump(&perform_call);
+ }
+ }
+
+ {
+ // Call to a WasmJSFunction. The call target is
+ // function_data->wasm_to_js_wrapper_code()->instruction_start().
+ // The instance_node is the pair
+ // (current WasmInstanceObject, function_data->callable()).
+ __ bind(&is_js_function);
+
+ LiftoffRegister callable = temp;
+ __ LoadTaggedPointer(
+ callable.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset),
+ pinned);
+
+ // Preserve {func_data} across the call.
+ LiftoffRegList saved_regs = LiftoffRegList::ForRegs(func_data);
+ __ PushRegisters(saved_regs);
+
+ WasmCode::RuntimeStubId builtin = WasmCode::kWasmAllocatePair;
+ compiler::CallDescriptor* builtin_call_descriptor =
+ GetBuiltinCallDescriptor<WasmAllocatePairDescriptor>(
+ compilation_zone_);
+ ValueKind sig_reps[] = {kOptRef, kOptRef, kOptRef};
+ ValueKindSig builtin_sig(1, 2, sig_reps);
+ LiftoffRegister current_instance = instance;
+ __ FillInstanceInto(current_instance.gp());
+ LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
+ LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
+ __ PrepareBuiltinCall(&builtin_sig, builtin_call_descriptor,
+ {instance_var, callable_var});
+
+ __ CallRuntimeStub(builtin);
+ DefineSafepoint();
+ if (instance.gp() != kReturnRegister0) {
+ __ Move(instance.gp(), kReturnRegister0, LiftoffAssembler::kIntPtr);
+ }
+
+ // Restore {func_data}, which we saved across the call.
+ __ PopRegisters(saved_regs);
+
+ LiftoffRegister wrapper_code = target;
+ __ LoadTaggedPointer(wrapper_code.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
+ pinned);
+ __ emit_ptrsize_addi(target.gp(), wrapper_code.gp(),
+ wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
+ // Fall through to {perform_call}.
+ }
+
+ __ bind(&perform_call);
+ // Now the call target is in {target}, and the right instance object
+ // is in {instance}.
+ Register target_reg = target.gp();
+ Register instance_reg = instance.gp();
+ __ PrepareCall(sig, call_descriptor, &target_reg, &instance_reg);
+ if (call_kind == kReturnCall) {
+ __ PrepareTailCall(
+ static_cast<int>(call_descriptor->StackParameterCount()),
+ static_cast<int>(
+ call_descriptor->GetStackParameterDelta(descriptor_)));
+ __ TailCallIndirect(target_reg);
+ } else {
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), true);
+ __ CallIndirect(sig, call_descriptor, target_reg);
+ }
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(DebugSideTableBuilder::kDidSpill);
+ __ FinishCall(sig, call_descriptor);
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
- LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize);
+ LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
__ LoadTaggedPointer(null, null, no_reg,
IsolateData::root_slot_offset(RootIndex::kNullValue),
pinned);
@@ -4598,7 +5347,7 @@ class LiftoffCompiler {
decoder->position(), WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
- __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, type, object,
+ __ emit_cond_jump(LiftoffCondition::kEqual, trap_label, kOptRef, object,
null.gp());
}
@@ -4611,8 +5360,8 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(WasmArray::kLengthOffset);
__ Load(length, array.gp(), no_reg, kLengthOffset, LoadType::kI32Load,
pinned);
- __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label,
- kWasmI32, index.gp(), length.gp());
+ __ emit_cond_jump(LiftoffCondition::kUnsignedGreaterEqual, trap_label, kI32,
+ index.gp(), length.gp());
}
int StructFieldOffset(const StructType* struct_type, int field_index) {
@@ -4621,52 +5370,53 @@ class LiftoffCompiler {
}
void LoadObjectField(LiftoffRegister dst, Register src, Register offset_reg,
- int offset, ValueType type, bool is_signed,
+ int offset, ValueKind kind, bool is_signed,
LiftoffRegList pinned) {
- if (type.is_reference_type()) {
+ if (is_reference_type(kind)) {
__ LoadTaggedPointer(dst.gp(), src, offset_reg, offset, pinned);
} else {
- // Primitive type.
- LoadType load_type = LoadType::ForValueType(type, is_signed);
+ // Primitive kind.
+ LoadType load_type = LoadType::ForValueKind(kind, is_signed);
__ Load(dst, src, offset_reg, offset, load_type, pinned);
}
}
void StoreObjectField(Register obj, Register offset_reg, int offset,
LiftoffRegister value, LiftoffRegList pinned,
- ValueType type) {
- if (type.is_reference_type()) {
+ ValueKind kind) {
+ if (is_reference_type(kind)) {
__ StoreTaggedPointer(obj, offset_reg, offset, value, pinned);
} else {
- // Primitive type.
- StoreType store_type = StoreType::ForValueType(type);
+ // Primitive kind.
+ StoreType store_type = StoreType::ForValueKind(kind);
__ Store(obj, offset_reg, offset, value, store_type, pinned);
}
}
- void SetDefaultValue(LiftoffRegister reg, ValueType type,
+ void SetDefaultValue(LiftoffRegister reg, ValueKind kind,
LiftoffRegList pinned) {
- DCHECK(type.is_defaultable());
- switch (type.kind()) {
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kI32:
+ DCHECK(is_defaultable(kind));
+ switch (kind) {
+ case kI8:
+ case kI16:
+ case kI32:
return __ LoadConstant(reg, WasmValue(int32_t{0}));
- case ValueType::kI64:
+ case kI64:
return __ LoadConstant(reg, WasmValue(int64_t{0}));
- case ValueType::kF32:
+ case kF32:
return __ LoadConstant(reg, WasmValue(float{0.0}));
- case ValueType::kF64:
+ case kF64:
return __ LoadConstant(reg, WasmValue(double{0.0}));
- case ValueType::kS128:
+ case kS128:
DCHECK(CpuFeatures::SupportsWasmSimd128());
return __ emit_s128_xor(reg, reg, reg);
- case ValueType::kOptRef:
+ case kOptRef:
return LoadNullValue(reg.gp(), pinned);
- case ValueType::kRtt:
- case ValueType::kStmt:
- case ValueType::kBottom:
- case ValueType::kRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kStmt:
+ case kBottom:
+ case kRef:
UNREACHABLE();
}
}
@@ -4726,17 +5476,17 @@ class LiftoffCompiler {
// breakpoint, and a pointer after the list of breakpoints as end marker.
// A single breakpoint at offset 0 indicates that we should prepare the
// function for stepping by flooding it with breakpoints.
- int* next_breakpoint_ptr_ = nullptr;
- int* next_breakpoint_end_ = nullptr;
+ const int* next_breakpoint_ptr_ = nullptr;
+ const int* next_breakpoint_end_ = nullptr;
// Introduce a dead breakpoint to ensure that the calculation of the return
// address in OSR is correct.
int dead_breakpoint_ = 0;
- // Remember whether the "hook on function call" has already been checked.
- // This happens at the first breakable opcode in the function (if compiling
- // for debugging).
- bool checked_hook_on_function_call_ = false;
+ // Remember whether the did function-entry break checks (for "hook on function
+ // call" and "break on entry" a.k.a. instrumentation breakpoint). This happens
+ // at the first breakable opcode in the function (if compiling for debugging).
+ bool did_function_entry_break_checks_ = false;
bool has_outstanding_op() const {
return outstanding_op_ != kNoOutstandingOp;
@@ -4758,11 +5508,21 @@ class LiftoffCompiler {
}
void DefineSafepoint() {
- Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(
- &asm_, Safepoint::kNoLazyDeopt);
+ Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
__ cache_state()->DefineSafepoint(safepoint);
}
+ Register LoadInstanceIntoRegister(LiftoffRegList pinned, Register fallback) {
+ Register instance = __ cache_state()->cached_instance;
+ if (instance == no_reg) {
+ instance = __ cache_state()->TrySetCachedInstanceRegister(
+ pinned | LiftoffRegList::ForRegs(fallback));
+ if (instance == no_reg) instance = fallback;
+ __ LoadInstanceFromFrame(instance);
+ }
+ return instance;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(LiftoffCompiler);
};
@@ -4771,7 +5531,7 @@ class LiftoffCompiler {
WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator* allocator, CompilationEnv* env,
const FunctionBody& func_body, int func_index, ForDebugging for_debugging,
- Counters* counters, WasmFeatures* detected, Vector<int> breakpoints,
+ Counters* counters, WasmFeatures* detected, Vector<const int> breakpoints,
std::unique_ptr<DebugSideTable>* debug_sidetable, int dead_breakpoint) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
@@ -4787,8 +5547,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<wasm::WasmInstructionBuffer> instruction_buffer =
wasm::WasmInstructionBuffer::New(128 + code_size_estimate * 4 / 3);
std::unique_ptr<DebugSideTableBuilder> debug_sidetable_builder;
- // If we are emitting breakpoints, we should also emit the debug side table.
- DCHECK_IMPLIES(!breakpoints.empty(), debug_sidetable != nullptr);
if (debug_sidetable) {
debug_sidetable_builder = std::make_unique<DebugSideTableBuilder>();
}
@@ -4811,11 +5569,6 @@ WasmCompilationResult ExecuteLiftoffCompilation(
// Register the bailout reason (can also be {kSuccess}).
counters->liftoff_bailout_reasons()->AddSample(
static_cast<int>(compiler->bailout_reason()));
- if (compiler->did_bailout()) {
- counters->liftoff_unsupported_functions()->Increment();
- } else {
- counters->liftoff_compiled_functions()->Increment();
- }
}
if (compiler->did_bailout()) return WasmCompilationResult{};
@@ -4839,17 +5592,32 @@ WasmCompilationResult ExecuteLiftoffCompilation(
}
std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
- AccountingAllocator* allocator, CompilationEnv* env,
- const FunctionBody& func_body, int func_index) {
+ const WasmCode* code) {
+ auto* native_module = code->native_module();
+ auto* function = &native_module->module()->functions[code->index()];
+ ModuleWireBytes wire_bytes{native_module->wire_bytes()};
+ Vector<const byte> function_bytes = wire_bytes.GetFunctionBytes(function);
+ CompilationEnv env = native_module->CreateCompilationEnv();
+ FunctionBody func_body{function->sig, 0, function_bytes.begin(),
+ function_bytes.end()};
+
+ AccountingAllocator* allocator = native_module->engine()->allocator();
Zone zone(allocator, "LiftoffDebugSideTableZone");
- auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
+ auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, function->sig);
DebugSideTableBuilder debug_sidetable_builder;
WasmFeatures detected;
+ constexpr int kSteppingBreakpoints[] = {0};
+ DCHECK(code->for_debugging() == kForDebugging ||
+ code->for_debugging() == kForStepping);
+ Vector<const int> breakpoints = code->for_debugging() == kForStepping
+ ? ArrayVector(kSteppingBreakpoints)
+ : Vector<const int>{};
WasmFullDecoder<Decoder::kBooleanValidation, LiftoffCompiler> decoder(
- &zone, env->module, env->enabled_features, &detected, func_body,
- call_descriptor, env, &zone,
+ &zone, native_module->module(), env.enabled_features, &detected,
+ func_body, call_descriptor, &env, &zone,
NewAssemblerBuffer(AssemblerBase::kDefaultBufferSize),
- &debug_sidetable_builder, kForDebugging, func_index);
+ &debug_sidetable_builder, code->for_debugging(), code->index(),
+ breakpoints);
decoder.Decode();
DCHECK(decoder.ok());
DCHECK(!decoder.interface().did_bailout());
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.h b/deps/v8/src/wasm/baseline/liftoff-compiler.h
index 177ca7b78f..6987c2e779 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.h
@@ -56,11 +56,11 @@ enum LiftoffBailoutReason : int8_t {
V8_EXPORT_PRIVATE WasmCompilationResult ExecuteLiftoffCompilation(
AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index,
ForDebugging, Counters*, WasmFeatures* detected_features,
- Vector<int> breakpoints = {}, std::unique_ptr<DebugSideTable>* = nullptr,
- int dead_breakpoint = 0);
+ Vector<const int> breakpoints = {},
+ std::unique_ptr<DebugSideTable>* = nullptr, int dead_breakpoint = 0);
V8_EXPORT_PRIVATE std::unique_ptr<DebugSideTable> GenerateLiftoffDebugSideTable(
- AccountingAllocator*, CompilationEnv*, const FunctionBody&, int func_index);
+ const WasmCode*);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/baseline/liftoff-register.h b/deps/v8/src/wasm/baseline/liftoff-register.h
index bd2e6ed4c2..bb27b99dc2 100644
--- a/deps/v8/src/wasm/baseline/liftoff-register.h
+++ b/deps/v8/src/wasm/baseline/liftoff-register.h
@@ -45,40 +45,37 @@ static_assert(kNeedS128RegPair == (kFpRegPair != kNoReg),
enum RegPairHalf : uint8_t { kLowWord = 0, kHighWord = 1 };
-static inline constexpr bool needs_gp_reg_pair(ValueType type) {
- return kNeedI64RegPair && type == kWasmI64;
+static inline constexpr bool needs_gp_reg_pair(ValueKind kind) {
+ return kNeedI64RegPair && kind == kI64;
}
-static inline constexpr bool needs_fp_reg_pair(ValueType type) {
- return kNeedS128RegPair && type == kWasmS128;
+static inline constexpr bool needs_fp_reg_pair(ValueKind kind) {
+ return kNeedS128RegPair && kind == kS128;
}
-static inline constexpr RegClass reg_class_for(ValueType::Kind kind) {
+static inline constexpr RegClass reg_class_for(ValueKind kind) {
switch (kind) {
- case ValueType::kF32:
- case ValueType::kF64:
+ case kF32:
+ case kF64:
return kFpReg;
- case ValueType::kI8:
- case ValueType::kI16:
- case ValueType::kI32:
+ case kI8:
+ case kI16:
+ case kI32:
return kGpReg;
- case ValueType::kI64:
+ case kI64:
return kNeedI64RegPair ? kGpRegPair : kGpReg;
- case ValueType::kS128:
+ case kS128:
return kNeedS128RegPair ? kFpRegPair : kFpReg;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
return kGpReg;
default:
- return kNoReg; // unsupported type
+ return kNoReg; // unsupported kind
}
}
-static inline constexpr RegClass reg_class_for(ValueType type) {
- return reg_class_for(type.kind());
-}
-
// Description of LiftoffRegister code encoding.
// This example uses the ARM architecture, which as of writing has:
// - 9 GP registers, requiring 4 bits
@@ -191,9 +188,9 @@ class LiftoffRegister {
// Shifts the register code depending on the type before converting to a
// LiftoffRegister.
- static LiftoffRegister from_external_code(RegClass rc, ValueType type,
+ static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) {
- if (!kSimpleFPAliasing && type == kWasmF32) {
+ if (!kSimpleFPAliasing && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
@@ -201,7 +198,7 @@ class LiftoffRegister {
DCHECK_EQ(0, code % 2);
return LiftoffRegister::from_code(rc, code >> 1);
}
- if (kNeedS128RegPair && type == kWasmS128) {
+ if (kNeedS128RegPair && kind == kS128) {
// Similarly for double registers and SIMD registers, the SIMD code
// needs to be doubled to pass the f64 code to Liftoff.
return LiftoffRegister::ForFpPair(DoubleRegister::from_code(code << 1));
@@ -376,6 +373,10 @@ class LiftoffRegList {
}
return reg;
}
+ Register clear(Register reg) { return clear(LiftoffRegister{reg}).gp(); }
+ DoubleRegister clear(DoubleRegister reg) {
+ return clear(LiftoffRegister{reg}).fp();
+ }
bool has(LiftoffRegister reg) const {
if (reg.is_pair()) {
@@ -384,8 +385,8 @@ class LiftoffRegList {
}
return (regs_ & (storage_t{1} << reg.liftoff_code())) != 0;
}
- bool has(Register reg) const { return has(LiftoffRegister(reg)); }
- bool has(DoubleRegister reg) const { return has(LiftoffRegister(reg)); }
+ bool has(Register reg) const { return has(LiftoffRegister{reg}); }
+ bool has(DoubleRegister reg) const { return has(LiftoffRegister{reg}); }
constexpr bool is_empty() const { return regs_ == 0; }
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index c12eae4c39..94ba6f783e 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -84,25 +84,26 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
- int32_t offset, ValueType type) {
+ int32_t offset, ValueKind kind) {
MemOperand src(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
assm->lw(dst.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
assm->lw(dst.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->lw(dst.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case ValueType::kF32:
+ case kF32:
assm->lwc1(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Ldc1(dst.fp(), src);
break;
default:
@@ -111,25 +112,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
MemOperand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Usw(src.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
assm->Usw(src.low_gp(),
MemOperand(base, offset + liftoff::kLowWordOffset));
assm->Usw(src.high_gp(),
MemOperand(base, offset + liftoff::kHighWordOffset));
break;
- case ValueType::kF32:
+ case kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case ValueType::kF64:
+ case kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
default:
@@ -137,25 +139,25 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->push(reg.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->Push(reg.high_gp(), reg.low_gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->addiu(sp, sp, -sizeof(float));
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kF64:
+ case kF64:
assm->addiu(sp, sp, -sizeof(double));
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kOptRef:
- assm->push(reg.gp());
- break;
default:
UNREACHABLE();
}
@@ -363,26 +365,26 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64: {
+ case kI64: {
DCHECK(RelocInfo::IsNone(rmode));
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -390,10 +392,10 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
TurboAssembler::li(reg.high_gp(), Operand(high_word));
break;
}
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -401,17 +403,30 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
- int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
- DCHECK_EQ(4, size);
- lw(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int32_t offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ lb(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ lw(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
int32_t offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ lw(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -435,7 +450,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt32Size);
Register dst = no_reg;
if (offset_reg != no_reg) {
@@ -445,6 +461,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm)
: MemOperand(dst_addr, offset_imm);
Sw(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
// The write barrier.
Label write_barrier;
Label exit;
@@ -452,12 +471,12 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(dst_addr, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
&write_barrier);
- Branch(USE_DELAY_SLOT, &exit);
+ Branch(&exit);
bind(&write_barrier);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
- Addu(scratch, dst_addr, offset_imm);
+ Addu(scratch, dst_op.rm(), dst_op.offset());
CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
wasm::WasmCode::kRecordWrite);
bind(&exit);
@@ -678,60 +697,61 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Load(this, dst, fp, offset, type);
+ liftoff::Load(this, dst, fp, offset, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Store(this, fp, offset, src, type);
+ liftoff::Store(this, fp, offset, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- liftoff::Load(this, dst, sp, offset, type);
+ ValueKind kind) {
+ liftoff::Load(this, dst, sp, offset, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
TurboAssembler::mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
TurboAssembler::Move(dst, src);
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
sw(reg.gp(), dst);
break;
- case ValueType::kI64:
+ case kI64:
sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case ValueType::kF32:
+ case kF32:
swc1(reg.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
default:
@@ -743,13 +763,15 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32: {
+ case kI32:
+ case kRef:
+ case kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
- case ValueType::kI64: {
+ case kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
@@ -768,22 +790,22 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ switch (kind) {
+ case kI32:
+ case kRef:
+ case kOptRef:
lw(reg.gp(), src);
break;
- case ValueType::kI64:
+ case kI64:
lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord));
lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord));
break;
- case ValueType::kF32:
+ case kF32:
lwc1(reg.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
default:
@@ -1488,15 +1510,15 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK(type == kWasmI32 ||
- (type.is_reference_type() &&
+ DCHECK(kind == kI32 ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1691,7 +1713,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1712,6 +1734,13 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
bailout(kSimd, "load extend and load splat unimplemented");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
@@ -1792,6 +1821,12 @@ SIMD_BINOP(i64x2_extmul_high_i32x4_u, ilvl_w, dotp_u_d)
#undef SIMD_BINOP
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_eq");
@@ -1902,6 +1937,21 @@ void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f32x4_le");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_f64x2_eq");
@@ -1964,9 +2014,9 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v8x16_anytrue");
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
}
void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
@@ -2074,16 +2124,16 @@ void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_max_u");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i16x8_neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_v16x8_alltrue");
@@ -2189,16 +2239,21 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_max_u");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "emit_v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_v32x4_alltrue");
@@ -2286,11 +2341,26 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i64x2_bitmask");
@@ -2343,6 +2413,16 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i64x2_mul");
}
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_abs");
@@ -2493,6 +2573,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f64x2_pmax");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i32x4_sconvert_f32x4");
@@ -2503,6 +2598,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_uconvert_f32x4");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_f32x4_sconvert_i32x4");
@@ -2513,6 +2618,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "emit_f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2577,6 +2687,26 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2782,17 +2912,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
addiu(sp, sp, -stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2818,8 +2948,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, sp, 0, out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, sp, 0, out_argument_kind);
}
addiu(sp, sp, stack_bytes);
@@ -2833,7 +2963,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -2873,7 +3003,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack: {
- if (src.type().kind() == ValueType::kF64) {
+ if (src.kind() == kF64) {
DCHECK_EQ(kLowWord, slot.half_);
asm_->lw(kScratchReg,
liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord));
@@ -2885,12 +3015,12 @@ void LiftoffStackSlots::Construct() {
break;
}
case LiftoffAssembler::VarState::kRegister:
- if (src.type().kind() == ValueType::kI64) {
+ if (src.kind() == kI64) {
liftoff::push(
asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(),
- kWasmI32);
+ kI32);
} else {
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
}
break;
case LiftoffAssembler::VarState::kIntConst: {
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index b97b423e20..deb54995b1 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -6,6 +6,7 @@
#define V8_WASM_BASELINE_MIPS64_LIFTOFF_ASSEMBLER_MIPS64_H_
#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
@@ -92,24 +93,25 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->Lw(dst.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Ld(dst.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
assm->Lwc1(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Ldc1(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->ld_b(dst.fp().toW(), src);
break;
default:
@@ -118,25 +120,26 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
}
inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
- LiftoffRegister src, ValueType type) {
+ LiftoffRegister src, ValueKind kind) {
MemOperand dst(base, offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
assm->Usw(src.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->Usd(src.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
assm->Uswc1(src.fp(), dst, t8);
break;
- case ValueType::kF64:
+ case kF64:
assm->Usdc1(src.fp(), dst, t8);
break;
- case ValueType::kS128:
+ case kS128:
assm->st_b(src.fp().toW(), dst);
break;
default:
@@ -144,24 +147,27 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->sw(reg.gp(), MemOperand(sp, 0));
break;
- case ValueType::kI64:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
assm->push(reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->swc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kF64:
+ case kF64:
assm->daddiu(sp, sp, -kSystemPointerSize);
assm->Sdc1(reg.fp(), MemOperand(sp, 0));
break;
- case ValueType::kS128:
+ case kS128:
assm->daddiu(sp, sp, -kSystemPointerSize * 2);
assm->st_b(reg.fp().toW(), MemOperand(sp, 0));
break;
@@ -346,32 +352,32 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.kind() == ValueType::kS128 || type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
break;
- case ValueType::kI64:
+ case kI64:
TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
break;
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -379,21 +385,33 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
- int size) {
- DCHECK_LE(0, offset);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
- DCHECK(size == 4 || size == 8);
- if (size == 4) {
- Lw(dst, MemOperand(dst, offset));
- } else {
- Ld(dst, MemOperand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Lb(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Lw(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
int32_t offset) {
- LoadFromInstance(dst, offset, kTaggedSize);
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -417,24 +435,27 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
STATIC_ASSERT(kTaggedSize == kInt64Size);
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
+ if (skip_write_barrier) return;
+
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
&write_barrier);
- Branch(USE_DELAY_SLOT, &exit);
+ Branch(&exit);
bind(&write_barrier);
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
- Daddu(scratch, dst_addr, offset_imm);
+ Daddu(scratch, dst_op.rm(), dst_op.offset());
CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
wasm::WasmCode::kRecordWrite);
bind(&exit);
@@ -605,67 +626,68 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
- liftoff::Store(this, fp, offset, src, type);
+ liftoff::Store(this, fp, offset, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- liftoff::Load(this, dst, MemOperand(sp, offset), type);
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
- Fill(reg, src_offset, type);
- Spill(dst_offset, reg, type);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
// TODO(ksreten): Handle different sizes here.
TurboAssembler::Move(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type != kWasmS128) {
+ if (kind != kS128) {
TurboAssembler::Move(dst, src);
} else {
TurboAssembler::move_v(dst.toW(), src.toW());
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Sw(reg.gp(), dst);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
Sd(reg.gp(), dst);
break;
- case ValueType::kF32:
+ case kF32:
Swc1(reg.fp(), dst);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Sdc1(reg.fp(), dst);
break;
- case ValueType::kS128:
+ case kS128:
TurboAssembler::st_b(reg.fp().toW(), dst);
break;
default:
@@ -677,15 +699,15 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32: {
+ case kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
Sw(tmp.gp(), dst);
break;
}
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef: {
+ case kI64:
+ case kRef:
+ case kOptRef: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
Sd(tmp.gp(), dst);
@@ -698,24 +720,24 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
MemOperand src = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
Lw(reg.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kRef:
- case ValueType::kOptRef:
+ case kI64:
+ case kRef:
+ case kOptRef:
Ld(reg.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
Lwc1(reg.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Ldc1(reg.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
TurboAssembler::ld_b(reg.fp().toW(), src);
break;
default:
@@ -1342,15 +1364,15 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs == no_reg) {
- DCHECK(type == kWasmI32 || type == kWasmI64);
+ DCHECK(kind == kI32 || kind == kI64);
TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
} else {
- DCHECK((type == kWasmI32 || type == kWasmI64) ||
- (type.is_reference_type() &&
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference_type(kind) &&
(liftoff_cond == kEqual || liftoff_cond == kUnequal)));
TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
}
@@ -1527,7 +1549,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -1612,7 +1634,20 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
Register addr, Register offset_reg,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
- bailout(kSimd, "loadlane");
+ MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx);
+ TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op);
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ LoadStoreLaneParams store_params(type.mem_rep(), lane);
+ TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op);
}
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
@@ -1719,6 +1754,24 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32)
#undef SIMD_BINOP
+#define SIMD_BINOP(name1, name2, type) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s, MSAS8)
+SIMD_BINOP(i16x8, i8x16_u, MSAU8)
+SIMD_BINOP(i32x4, i16x8_s, MSAS16)
+SIMD_BINOP(i32x4, i16x8_u, MSAU16)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ mulr_q_h(dst.fp().toW(), src1.fp().toW(), src2.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
ceq_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1832,6 +1885,23 @@ void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
fcle_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ ceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
+ nor_v(dst.fp().toW(), dst.fp().toW(), dst.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ add_a_d(dst.fp().toW(), src.fp().toW(), kSimd128RegZero);
+}
+
void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fceq_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1908,8 +1978,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2032,17 +2102,17 @@ void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
max_u_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ pcnt_b(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
LiftoffRegister src) {
xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
@@ -2167,11 +2237,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
@@ -2276,6 +2341,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
subv_d(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_D);
+}
+
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
srli_d(kSimd128RegZero, src.fp().toW(), 63);
@@ -2335,6 +2405,16 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
mulv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ clt_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ cle_s_d(dst.fp().toW(), rhs.fp().toW(), lhs.fp().toW());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
bclri_w(dst.fp().toW(), src.fp().toW(), 31);
@@ -2581,6 +2661,27 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bsel_v(dst_msa, lhs_msa, rhs_msa);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
+ slli_d(kSimd128RegZero, kSimd128RegZero, 32);
+ srai_d(kSimd128RegZero, kSimd128RegZero, 32);
+ ffint_s_d(dst.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(kSimd128RegZero, kSimd128RegZero, src.fp().toW());
+ ffint_u_d(dst.fp().toW(), kSimd128RegZero);
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ fexupr_d(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
ftrunc_s_w(dst.fp().toW(), src.fp().toW());
@@ -2591,6 +2692,22 @@ void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
ftrunc_u_w(dst.fp().toW(), src.fp().toW());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ftrunc_s_d(kSimd128ScratchReg, src.fp().toW());
+ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ftrunc_u_d(kSimd128ScratchReg, src.fp().toW());
+ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31);
+ pckev_w(dst.fp().toW(), kSimd128RegZero, kSimd128ScratchReg);
+}
+
void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
LiftoffRegister src) {
ffint_s_w(dst.fp().toW(), src.fp().toW());
@@ -2601,6 +2718,12 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
ffint_u_w(dst.fp().toW(), src.fp().toW());
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fexdo_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2691,6 +2814,32 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
ilvl_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvr_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
+ srai_d(dst.fp().toW(), dst.fp().toW(), 32);
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ilvl_w(kSimd128ScratchReg, src.fp().toW(), src.fp().toW());
+ slli_d(dst.fp().toW(), kSimd128ScratchReg, 32);
+ srai_d(dst.fp().toW(), dst.fp().toW(), 32);
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvr_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ ilvl_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2930,17 +3079,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
Daddu(sp, sp, -stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, sp, arg_bytes, *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -2966,8 +3115,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
}
Daddu(sp, sp, stack_bytes);
@@ -2981,7 +3130,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
Jump(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -3021,7 +3170,7 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- if (src.type() != kWasmS128) {
+ if (src.kind() != kS128) {
asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
asm_->push(kScratchReg);
} else {
@@ -3032,7 +3181,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
break;
case LiftoffAssembler::VarState::kIntConst: {
asm_->li(kScratchReg, Operand(src.i32_const()));
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 1a2e950615..644d392594 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -72,17 +72,17 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return (kind == kS128 || is_reference_type(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
@@ -90,11 +90,18 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
bailout(kUnsupportedArchitecture, "LoadConstant");
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ bailout(kUnsupportedArchitecture, "LoadInstanceFromFrame");
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
bailout(kUnsupportedArchitecture, "LoadFromInstance");
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
}
@@ -117,7 +124,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
bailout(kRefTypes, "GlobalSet");
}
@@ -195,36 +203,36 @@ void LiftoffAssembler::AtomicFence() { sync(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "LoadReturnStackSlot");
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "MoveStackValue");
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Move Register");
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
bailout(kUnsupportedArchitecture, "Move DoubleRegister");
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Spill register");
}
@@ -232,7 +240,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
bailout(kUnsupportedArchitecture, "Spill value");
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
bailout(kUnsupportedArchitecture, "Fill");
}
@@ -520,7 +528,7 @@ void LiftoffAssembler::emit_jump(Register target) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
bailout(kUnsupportedArchitecture, "emit_cond_jump");
}
@@ -566,7 +574,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -590,6 +598,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
bailout(kSimd, "loadlane");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "store lane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -693,6 +708,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -813,6 +843,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
@@ -883,6 +918,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
bailout(kSimd, "i64x2_bitmask");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -912,11 +967,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v32x4_alltrue");
@@ -1004,6 +1054,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1038,11 +1098,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v16x8_alltrue");
@@ -1161,6 +1216,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1185,6 +1250,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1199,6 +1270,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
bailout(kSimd, "i8x16_shuffle");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16.popcnt");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -1222,8 +1298,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
bailout(kSimd, "v8x16_anytrue");
}
@@ -1400,6 +1476,26 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
@@ -1491,6 +1587,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4.demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1555,6 +1656,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1588,6 +1699,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16sub");
@@ -1647,10 +1763,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
bailout(kUnsupportedArchitecture, "CallC");
}
@@ -1663,7 +1779,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
new file mode 100644
index 0000000000..2f624f79f5
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -0,0 +1,2516 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
+#define V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, uintptr_t offset_imm) {
+ if (is_uint31(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->Add64(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, offset_imm);
+ assm->Add64(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->Add64(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->Lw(dst.gp(), src);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ assm->Ld(dst.gp(), src);
+ break;
+ case ValueType::kF32:
+ assm->LoadFloat(dst.fp(), src);
+ break;
+ case ValueType::kF64:
+ assm->LoadDouble(dst.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueType type) {
+ MemOperand dst(base, offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->Usw(src.gp(), dst);
+ break;
+ case ValueType::kI64:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
+ assm->Usd(src.gp(), dst);
+ break;
+ case ValueType::kF32:
+ assm->UStoreFloat(src.fp(), dst);
+ break;
+ case ValueType::kF64:
+ assm->UStoreDouble(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kI32:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->Sw(reg.gp(), MemOperand(sp, 0));
+ break;
+ case ValueType::kI64:
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ case ValueType::kRtt:
+ assm->push(reg.gp());
+ break;
+ case ValueType::kF32:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->StoreFloat(reg.fp(), MemOperand(sp, 0));
+ break;
+ case ValueType::kF64:
+ assm->addi(sp, sp, -kSystemPointerSize);
+ assm->StoreDouble(reg.fp(), MemOperand(sp, 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst,
+ LoadType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = dst;
+ switch (type.value()) {
+ case LoadType::kI64Load8U:
+ case LoadType::kI64Load8S:
+ case LoadType::kI32Load8U:
+ case LoadType::kI32Load8S:
+ // No need to change endianness for byte size.
+ return;
+ case LoadType::kF32Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load32U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case LoadType::kF64Load:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst);
+ V8_FALLTHROUGH;
+ case LoadType::kI64Load:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case LoadType::kF32Load:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp);
+ break;
+ case LoadType::kF64Load:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bool is_float = false;
+ LiftoffRegister tmp = src;
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ // No need to change endianness for byte size.
+ return;
+ case StoreType::kF32Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI32Store:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kI32Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ case StoreType::kF64Store:
+ is_float = true;
+ tmp = assm->GetUnusedRegister(kGpReg, pinned);
+ assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src);
+ V8_FALLTHROUGH;
+ case StoreType::kI64Store:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8);
+ break;
+ case StoreType::kI64Store32:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4);
+ break;
+ case StoreType::kI64Store16:
+ assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_float) {
+ switch (type.value()) {
+ case StoreType::kF32Store:
+ assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp);
+ break;
+ case StoreType::kF64Store:
+ assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+#endif // V8_TARGET_BIG_ENDIAN
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ Add64(sp, sp, Operand(0L));
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld(scratch, MemOperand(sp, i * 8));
+ Sd(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ Add64(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
+ int frame_size = GetTotalFrameSize() - kSystemPointerSize;
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+ // If bytes can be represented as 16bit, addi will be generated and two
+ // nops will stay untouched. Otherwise, lui-ori sequence will load it to
+ // register and, as third instruction, daddu will be generated.
+ patching_assembler.Add64(sp, sp, Operand(-frame_size));
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return type.element_size_bytes();
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueType type) {
+ switch (type.kind()) {
+ case ValueType::kS128:
+ return true;
+ default:
+ // No alignment because all other types are kStackSlotSize.
+ return false;
+ }
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case ValueType::kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case ValueType::kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case ValueType::kF32:
+ TurboAssembler::LoadFPRImmediate(reg.fp(),
+ value.to_f32_boxed().get_bits());
+ break;
+ case ValueType::kF64:
+ TurboAssembler::LoadFPRImmediate(reg.fp(),
+ value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset,
+ int size) {
+ DCHECK_LE(0, offset);
+ Ld(dst, liftoff::GetInstanceOperand());
+ DCHECK(size == 4 || size == 8);
+ if (size == 4) {
+ Lw(dst, MemOperand(dst, offset));
+ } else {
+ Ld(dst, MemOperand(dst, offset));
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ int32_t offset) {
+ LoadFromInstance(dst, offset, kTaggedSize);
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ Sd(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+ Sd(src.gp(), dst_op);
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ Branch(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
+ Add64(scratch, dst_addr, offset_imm);
+ CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Lbu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Lb(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ulhu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ulh(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ulwu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ulw(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Uld(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (is_load_mem) {
+ pinned.set(src_op.rm());
+ liftoff::ChangeEndiannessLoad(this, dst, type, pinned);
+ }
+#endif
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+#if defined(V8_TARGET_BIG_ENDIAN)
+ if (is_store_mem) {
+ pinned.set(dst_op.rm());
+ LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned);
+ // Save original value.
+ Move(tmp, src, type.value_type());
+
+ src = tmp;
+ pinned.set(tmp);
+ liftoff::ChangeEndiannessStore(this, src, type, pinned);
+ }
+#endif
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ Sb(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::Ush(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::Usw(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::Usd(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { sync(); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, type);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueType type) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, type);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueType type) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), type);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueType type) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
+ Fill(reg, src_offset, type);
+ Spill(dst_offset, reg, type);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueType type) {
+ DCHECK_NE(dst, src);
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ Sw(reg.gp(), dst);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ case ValueType::kRtt:
+ case ValueType::kRttWithDepth:
+ Sd(reg.gp(), dst);
+ break;
+ case ValueType::kF32:
+ StoreFloat(reg.fp(), dst);
+ break;
+ case ValueType::kF64:
+ TurboAssembler::StoreDouble(reg.fp(), dst);
+ break;
+ case ValueType::kS128:
+ bailout(kSimd, "Spill S128");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case ValueType::kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ Sw(tmp.gp(), dst);
+ break;
+ }
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ Sd(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (type.kind()) {
+ case ValueType::kI32:
+ Lw(reg.gp(), src);
+ break;
+ case ValueType::kI64:
+ case ValueType::kRef:
+ case ValueType::kOptRef:
+ Ld(reg.gp(), src);
+ break;
+ case ValueType::kF32:
+ LoadFloat(reg.fp(), src);
+ break;
+ case ValueType::kF64:
+ TurboAssembler::LoadDouble(reg.fp(), src);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ Sd(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ Sw(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add64(a0, fp, Operand(-start - size));
+ Add64(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ Sd(zero_reg, MemOperand(a0));
+ addi(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz64(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz64(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne);
+ TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne);
+ add(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Divu32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod32(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Modu32(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, addw)
+I32_BINOP(sub, subw)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add32)
+I32_BINOP_I(sub, Sub32)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz32(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz32(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt32(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction(dst, src, amount & 31); \
+ }
+
+I32_SHIFTOP(shl, sllw)
+I32_SHIFTOP(sar, sraw)
+I32_SHIFTOP(shr, srlw)
+
+I32_SHIFTOP_I(shl, slliw)
+I32_SHIFTOP_I(sar, sraiw)
+I32_SHIFTOP_I(shr, srliw)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::CompareI(kScratchReg, lhs.gp(),
+ Operand(std::numeric_limits<int64_t>::min()), ne);
+ TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne);
+ add(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, add)
+I64_BINOP(sub, sub)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ DCHECK(is_uint6(amount)); \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+
+I64_SHIFTOP(shl, sll)
+I64_SHIFTOP(sar, sra)
+I64_SHIFTOP(shr, srl)
+
+I64_SHIFTOP_I(shl, slli)
+I64_SHIFTOP_I(sar, srai)
+I64_SHIFTOP_I(shr, srli)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm));
+}
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ addw(dst, src, zero_reg);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float32Min(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float32Max(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float64Min(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ TurboAssembler::Float64Max(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src, kScratchDoubleReg); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ // According to WebAssembly spec, if I64 value does not fit the range of
+ // I32, the value is undefined. Therefore, We use sign extension to
+ // implement I64 to I32 truncation
+ TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI32SConvertF32:
+ case kExprI32UConvertF32:
+ case kExprI32SConvertF64:
+ case kExprI32UConvertF64:
+ case kExprI64SConvertF32:
+ case kExprI64UConvertF32:
+ case kExprI64SConvertF64:
+ case kExprI64UConvertF64:
+ case kExprF32ConvertF64: {
+ // real conversion, if src is out-of-bound of target integer types,
+ // kScratchReg is set to 0
+ switch (opcode) {
+ case kExprI32SConvertF32:
+ Trunc_w_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32UConvertF32:
+ Trunc_uw_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32SConvertF64:
+ Trunc_w_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI32UConvertF64:
+ Trunc_uw_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64SConvertF32:
+ Trunc_l_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64UConvertF32:
+ Trunc_ul_s(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64SConvertF64:
+ Trunc_l_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprI64UConvertF64:
+ Trunc_ul_d(dst.gp(), src.fp(), kScratchReg);
+ break;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ TurboAssembler::SignExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::ZeroExtendWord(dst.gp(), src.gp());
+ return true;
+ case kExprI64ReinterpretF64:
+ fmv_x_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ TurboAssembler::Cvt_s_w(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Cvt_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ReinterpretI32:
+ fmv_w_x(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ TurboAssembler::Cvt_d_w(dst.fp(), src.gp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Cvt_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ fmv_d_x(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ slliw(dst, src, 32 - 8);
+ sraiw(dst, dst, 32 - 8);
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ slliw(dst, src, 32 - 16);
+ sraiw(dst, dst, 32 - 16);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 8);
+ srai(dst.gp(), dst.gp(), 64 - 8);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 16);
+ srai(dst.gp(), dst.gp(), 64 - 16);
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ slli(dst.gp(), src.gp(), 64 - 32);
+ srai(dst.gp(), dst.gp(), 64 - 32);
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueType type,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(type == kWasmI32 || type == kWasmI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((type == kWasmI32 || type == kWasmI64) ||
+ (type.is_reference_type() &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ TurboAssembler::Sltu(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ TurboAssembler::Sltu(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond);
+}
+
+static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return EQ;
+ case kUnequal:
+ return NE;
+ case kUnsignedLessThan:
+ return LT;
+ case kUnsignedGreaterEqual:
+ return GE;
+ case kUnsignedLessEqual:
+ return LE;
+ case kUnsignedGreaterThan:
+ return GT;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
+ TurboAssembler::CompareF32(dst, fcond, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond);
+ TurboAssembler::CompareF64(dst, fcond, lhs, rhs);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueType type) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "StoreLane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_mul");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Uld(limit_address, MemOperand(limit_address));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (emit_debug_code()) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ int32_t num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ int32_t offset = num_gp_regs * kSystemPointerSize;
+ Add64(sp, sp, Operand(-offset));
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ Sd(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ int32_t num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ Add64(sp, sp, Operand(-(num_fp_regs * kStackSlotSize)));
+ int32_t offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += sizeof(double);
+ }
+ DCHECK_EQ(offset, num_fp_regs * sizeof(double));
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ int32_t fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += sizeof(double);
+ }
+ if (fp_offset) Add64(sp, sp, Operand(fp_offset));
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ int32_t gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ Add64(sp, sp, Operand(gp_offset));
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots));
+}
+
+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueType out_argument_type, int stack_bytes,
+ ExternalReference ext_ref) {
+ Add64(sp, sp, Operand(-stack_bytes));
+
+ int arg_bytes = 0;
+ for (ValueType param_type : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_type);
+ arg_bytes += param_type.element_size_bytes();
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On RISC-V, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mv(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_type != kWasmStmt) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_type);
+ }
+
+ Add64(sp, sp, Operand(stack_bytes));
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ Add64(sp, sp, Operand(-size));
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ Add64(sp, sp, Operand(size));
+}
+
+void LiftoffStackSlots::Construct() {
+ for (auto& slot : slots_) {
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->push(kScratchReg);
+ break;
+ case LiftoffAssembler::VarState::kRegister:
+ liftoff::push(asm_, src.reg(), src.type());
+ break;
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 1161595705..7bb58877dc 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -92,7 +92,6 @@ inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
- bailout(kUnsupportedArchitecture, "PrepareStackFrame");
int offset = pc_offset();
lay(sp, MemOperand(sp));
return offset;
@@ -135,67 +134,279 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- switch (type.kind()) {
- case ValueType::kS128:
- return type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
default:
return kStackSlotSize;
}
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return (type.kind() == ValueType::kS128 || type.is_reference_type());
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return (kind == kS128 || is_reference_type(kind));
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- bailout(kUnsupportedArchitecture, "LoadConstant");
+ switch (value.type().kind()) {
+ case kI32:
+ mov(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ mov(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadF32(reg.fp(), value.to_f32_boxed().get_scalar(), scratch);
+ break;
+ }
+ case kF64: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadF64(reg.fp(), value.to_f64_boxed().get_bits(), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- bailout(kUnsupportedArchitecture, "LoadFromInstance");
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ LoadU64(dst, liftoff::GetInstanceOperand());
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointerFromInstance");
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ LoadU8(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ LoadU32(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ LoadU64(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
+ DCHECK_LE(0, offset);
+ LoadTaggedPointerField(dst, MemOperand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
- bailout(kUnsupportedArchitecture, "SpillInstance");
+ StoreU64(instance, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::FillInstanceInto(Register dst) {
- bailout(kUnsupportedArchitecture, "FillInstanceInto");
+ LoadU64(dst, liftoff::GetInstanceOperand());
}
void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegList pinned) {
- bailout(kUnsupportedArchitecture, "LoadTaggedPointer");
+ CHECK(is_int20(offset_imm));
+ LoadTaggedPointerField(
+ dst,
+ MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
}
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
- bailout(kRefTypes, "GlobalSet");
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ MemOperand dst_op =
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ StoreTaggedField(src.gp(), dst_op);
+
+ if (skip_write_barrier) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, r1, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ if (COMPRESS_POINTERS_BOOL) {
+ DecompressTaggedPointer(src.gp(), src.gp());
+ }
+ CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
+ eq, &exit);
+ lay(r1, dst_op);
+ CallRecordWriteStub(dst_addr, r1, EMIT_REMEMBERED_SET, kSaveFPRegs,
+ wasm::WasmCode::kRecordWrite);
+ bind(&exit);
}
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
- bailout(kUnsupportedArchitecture, "Load");
+ UseScratchRegisterScope temps(this);
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand src_op =
+ MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ LoadU8(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ LoadS8(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ if (is_load_mem) {
+ LoadU16LE(dst.gp(), src_op);
+ } else {
+ LoadU16(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ if (is_load_mem) {
+ LoadS16LE(dst.gp(), src_op);
+ } else {
+ LoadS16(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI64Load32U:
+ if (is_load_mem) {
+ LoadU32LE(dst.gp(), src_op);
+ } else {
+ LoadU32(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ if (is_load_mem) {
+ LoadS32LE(dst.gp(), src_op);
+ } else {
+ LoadS32(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kI64Load:
+ if (is_load_mem) {
+ LoadU64LE(dst.gp(), src_op);
+ } else {
+ LoadU64(dst.gp(), src_op);
+ }
+ break;
+ case LoadType::kF32Load:
+ if (is_load_mem) {
+ LoadF32LE(dst.fp(), src_op, r0);
+ } else {
+ LoadF32(dst.fp(), src_op);
+ }
+ break;
+ case LoadType::kF64Load:
+ if (is_load_mem) {
+ LoadF64LE(dst.fp(), src_op, r0);
+ } else {
+ LoadF64(dst.fp(), src_op);
+ }
+ break;
+ case LoadType::kS128Load:
+ if (is_load_mem) {
+ LoadV128LE(dst.fp(), src_op, r0, r1);
+ } else {
+ LoadV128(dst.fp(), src_op, r0);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned,
uint32_t* protected_store_pc, bool is_store_mem) {
- bailout(kUnsupportedArchitecture, "Store");
+ if (!is_int20(offset_imm)) {
+ mov(ip, Operand(offset_imm));
+ if (offset_reg != no_reg) {
+ AddS64(ip, offset_reg);
+ }
+ offset_reg = ip;
+ offset_imm = 0;
+ }
+ MemOperand dst_op =
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ StoreU8(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ if (is_store_mem) {
+ StoreU16LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU16(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ if (is_store_mem) {
+ StoreU32LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU32(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kI64Store:
+ if (is_store_mem) {
+ StoreU64LE(src.gp(), dst_op, r1);
+ } else {
+ StoreU64(src.gp(), dst_op, r1);
+ }
+ break;
+ case StoreType::kF32Store:
+ if (is_store_mem) {
+ StoreF32LE(src.fp(), dst_op, r1);
+ } else {
+ StoreF32(src.fp(), dst_op);
+ }
+ break;
+ case StoreType::kF64Store:
+ if (is_store_mem) {
+ StoreF64LE(src.fp(), dst_op, r1);
+ } else {
+ StoreF64(src.fp(), dst_op);
+ }
+ break;
+ case StoreType::kS128Store: {
+ if (is_store_mem) {
+ StoreV128LE(src.fp(), dst_op, r0, r1);
+ } else {
+ StoreV128(src.fp(), dst_op, r1);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
@@ -258,53 +469,274 @@ void LiftoffAssembler::AtomicFence() { bailout(kAtomics, "AtomicFence"); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "LoadCallerFrameSlot");
+ ValueKind kind) {
+ int32_t offset = (caller_slot_idx + 1) * 8;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(fp, offset + 4));
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(fp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(dst.fp(), MemOperand(fp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "StoreCallerFrameSlot");
+ ValueKind kind) {
+ int32_t offset = (caller_slot_idx + 1) * 8;
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ StoreU32(src.gp(), MemOperand(fp, offset + 4));
+ break;
+#else
+ StoreU32(src.gp(), MemOperand(fp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ StoreU64(src.gp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF32: {
+ StoreF32(src.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kF64: {
+ StoreF64(src.fp(), MemOperand(fp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ StoreV128(src.fp(), MemOperand(fp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "LoadReturnStackSlot");
+ ValueKind kind) {
+ switch (kind) {
+ case kI32: {
+#if defined(V8_TARGET_BIG_ENDIAN)
+ LoadS32(dst.gp(), MemOperand(sp, offset + 4));
+ break;
+#else
+ LoadS32(dst.gp(), MemOperand(sp, offset));
+ break;
+#endif
+ }
+ case kRef:
+ case kRtt:
+ case kOptRef:
+ case kI64: {
+ LoadU64(dst.gp(), MemOperand(sp, offset));
+ break;
+ }
+ case kF32: {
+ LoadF32(dst.fp(), MemOperand(sp, offset));
+ break;
+ }
+ case kF64: {
+ LoadF64(dst.fp(), MemOperand(sp, offset));
+ break;
+ }
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(dst.fp(), MemOperand(sp, offset), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "MoveStackValue");
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ int length = 0;
+ switch (kind) {
+ case kI32:
+ case kF32:
+ length = 4;
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kF64:
+ length = 8;
+ break;
+ case kS128:
+ length = 16;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (is_int20(dst_offset)) {
+ lay(ip, liftoff::GetStackSlot(dst_offset));
+ } else {
+ mov(ip, Operand(-dst_offset));
+ lay(ip, MemOperand(fp, ip));
+ }
+
+ if (is_int20(src_offset)) {
+ lay(r1, liftoff::GetStackSlot(src_offset));
+ } else {
+ mov(r1, Operand(-src_offset));
+ lay(r1, MemOperand(fp, r1));
+ }
+
+ MoveChar(MemOperand(ip), MemOperand(r1), Operand(length));
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
- bailout(kUnsupportedArchitecture, "Move Register");
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ mov(dst, src);
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
- bailout(kUnsupportedArchitecture, "Move DoubleRegister");
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind == kF32) {
+ ler(dst, src);
+ } else if (kind == kF64) {
+ ldr(dst, src);
+ } else {
+ DCHECK_EQ(kS128, kind);
+ vlr(dst, src, Condition(0), Condition(0), Condition(0));
+ }
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
- bailout(kUnsupportedArchitecture, "Spill register");
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ DCHECK_LT(0, offset);
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ StoreU32(reg.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ StoreU64(reg.gp(), dst);
+ break;
+ case kF32:
+ StoreF32(reg.fp(), dst);
+ break;
+ case kF64:
+ StoreF64(reg.fp(), dst);
+ break;
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ StoreV128(reg.fp(), dst, scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::Spill(int offset, WasmValue value) {
- bailout(kUnsupportedArchitecture, "Spill value");
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ UseScratchRegisterScope temps(this);
+ Register src = no_reg;
+ if (!is_uint12(abs(dst.offset()))) {
+ src = GetUnusedRegister(kGpReg, {}).gp();
+ } else {
+ src = temps.Acquire();
+ }
+ switch (value.type().kind()) {
+ case kI32: {
+ mov(src, Operand(value.to_i32()));
+ StoreU32(src, dst);
+ break;
+ }
+ case kI64: {
+ mov(src, Operand(value.to_i64()));
+ StoreU64(src, dst);
+ break;
+ }
+ default:
+ // We do not track f32 and f64 constants, hence they are unreachable.
+ UNREACHABLE();
+ }
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- bailout(kUnsupportedArchitecture, "Fill");
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ LoadS32(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ LoadU64(reg.gp(), src);
+ break;
+ case kF32:
+ LoadF32(reg.fp(), src);
+ break;
+ case kF64:
+ LoadF64(reg.fp(), src);
+ break;
+ case kS128: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadV128(reg.fp(), src, scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
- bailout(kUnsupportedArchitecture, "FillI64Half");
+ UNREACHABLE();
}
void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
DCHECK_LT(0, size);
+ DCHECK_EQ(0, size % 4);
RecordUsedSpillOffset(start + size);
// We need a zero reg. Always use r0 for that, and push it before to restore
@@ -328,16 +760,16 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
// Use r3 for start address (inclusive), r4 for end address (exclusive).
push(r3);
push(r4);
- SubS64(r3, fp, Operand(start + size));
- SubS64(r4, fp, Operand(start));
+
+ lay(r3, MemOperand(fp, -start - size));
+ lay(r4, MemOperand(fp, -start));
Label loop;
bind(&loop);
- StoreU64(r0, MemOperand(r0));
- la(r0, MemOperand(r0, kSystemPointerSize));
+ StoreU64(r0, MemOperand(r3));
+ lay(r3, MemOperand(r3, kSystemPointerSize));
CmpU64(r3, r4);
bne(&loop);
-
pop(r4);
pop(r3);
}
@@ -345,122 +777,133 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
pop(r0);
}
-#define UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register lhs, \
- Register rhs) { \
- bailout(kUnsupportedArchitecture, "i32 binop: " #name); \
- }
-#define UNIMPLEMENTED_I32_BINOP_I(name) \
- UNIMPLEMENTED_I32_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(Register dst, Register lhs, \
- int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i32 binop_i: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
- LiftoffRegister rhs) { \
- bailout(kUnsupportedArchitecture, "i64 binop: " #name); \
- }
-#define UNIMPLEMENTED_I64_BINOP_I(name) \
- UNIMPLEMENTED_I64_BINOP(name) \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister lhs, int32_t imm) { \
- bailout(kUnsupportedArchitecture, "i64 binop_i: " #name); \
- }
-#define UNIMPLEMENTED_GP_UNOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src) { \
- bailout(kUnsupportedArchitecture, "gp unop: " #name); \
- }
-#define UNIMPLEMENTED_FP_BINOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
- DoubleRegister rhs) { \
- bailout(kUnsupportedArchitecture, "fp binop: " #name); \
- }
-#define UNIMPLEMENTED_FP_UNOP(name) \
- void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
+#define SIGN_EXT(r) lgfr(r, r)
+#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define REGISTER_AND_WITH_1F \
+ ([&](Register rhs) { \
+ AndP(r1, rhs, Operand(31)); \
+ return r1; \
+ })
+
+#define LFR_TO_REG(reg) reg.gp()
+
+// V(name, instr, dtype, stype, dcast, scast, rcast)
+#define UNOP_LIST(V) \
+ V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE) \
+ V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE) \
+ V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, USE) \
+ V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE) \
+ V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE)
+
+#define EMIT_UNOP_FUNCTION(name, instr, dtype, stype, dcast, scast, rcast) \
+ void LiftoffAssembler::emit_##name(dtype dst, stype src) { \
+ auto _dst = dcast(dst); \
+ auto _src = scast(src); \
+ instr(_dst, _src); \
+ rcast(_dst); \
}
-#define UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(name) \
- bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
- bailout(kUnsupportedArchitecture, "fp unop: " #name); \
- return true; \
- }
-#define UNIMPLEMENTED_I32_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(Register dst, Register src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(Register dst, Register src, \
- int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i32 shiftop: " #name); \
- }
-#define UNIMPLEMENTED_I64_SHIFTOP(name) \
- void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister src, \
- Register amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
- } \
- void LiftoffAssembler::emit_##name##i(LiftoffRegister dst, \
- LiftoffRegister src, int32_t amount) { \
- bailout(kUnsupportedArchitecture, "i64 shiftop: " #name); \
+UNOP_LIST(EMIT_UNOP_FUNCTION)
+#undef EMIT_UNOP_FUNCTION
+#undef UNOP_LIST
+
+// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast)
+#define BINOP_LIST(V) \
+ V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_mul, MulF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f64_div, DivF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_add, AddF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_sub, SubF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_mul, MulF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(f32_div, DivF32, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE) \
+ V(i32_shli, ShiftLeftU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_sari, ShiftRightS32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shri, ShiftRightU32, Register, Register, int32_t, , , \
+ INT32_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shl, ShiftLeftU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_sar, ShiftRightS32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
+ REGISTER_AND_WITH_1F, SIGN_EXT) \
+ V(i32_addi, AddS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_subi, SubS32, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_andi, And, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_ori, Or, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_xori, Xor, Register, Register, int32_t, , , Operand, SIGN_EXT) \
+ V(i32_add, AddS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_sub, SubS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_and, And, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_or, Or, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_xor, Xor, Register, Register, Register, , , , SIGN_EXT) \
+ V(i32_mul, MulS32, Register, Register, Register, , , , SIGN_EXT) \
+ V(i64_add, AddS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_sub, SubS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_mul, MulS64, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_and, AndP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_or, OrP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_xor, XorP, LiftoffRegister, LiftoffRegister, LiftoffRegister, \
+ LFR_TO_REG, LFR_TO_REG, LFR_TO_REG, USE) \
+ V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
+ LFR_TO_REG, LFR_TO_REG, , USE) \
+ V(i64_addi, AddS64, LiftoffRegister, LiftoffRegister, int64_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_andi, AndP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_ori, OrP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_xori, XorP, LiftoffRegister, LiftoffRegister, int32_t, LFR_TO_REG, \
+ LFR_TO_REG, Operand, USE) \
+ V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE) \
+ V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE) \
+ V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
+ LFR_TO_REG, LFR_TO_REG, Operand, USE)
+
+#define EMIT_BINOP_FUNCTION(name, instr, dtype, stype1, stype2, dcast, scast1, \
+ scast2, rcast) \
+ void LiftoffAssembler::emit_##name(dtype dst, stype1 lhs, stype2 rhs) { \
+ auto _dst = dcast(dst); \
+ auto _lhs = scast1(lhs); \
+ auto _rhs = scast2(rhs); \
+ instr(_dst, _lhs, _rhs); \
+ rcast(_dst); \
}
-UNIMPLEMENTED_I32_BINOP_I(i32_add)
-UNIMPLEMENTED_I32_BINOP_I(i32_sub)
-UNIMPLEMENTED_I32_BINOP(i32_mul)
-UNIMPLEMENTED_I32_BINOP_I(i32_and)
-UNIMPLEMENTED_I32_BINOP_I(i32_or)
-UNIMPLEMENTED_I32_BINOP_I(i32_xor)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shl)
-UNIMPLEMENTED_I32_SHIFTOP(i32_sar)
-UNIMPLEMENTED_I32_SHIFTOP(i32_shr)
-UNIMPLEMENTED_I64_BINOP(i64_add)
-UNIMPLEMENTED_I64_BINOP(i64_sub)
-UNIMPLEMENTED_I64_BINOP(i64_mul)
-#ifdef V8_TARGET_ARCH_S390X
-UNIMPLEMENTED_I64_BINOP_I(i64_and)
-UNIMPLEMENTED_I64_BINOP_I(i64_or)
-UNIMPLEMENTED_I64_BINOP_I(i64_xor)
-#endif
-UNIMPLEMENTED_I64_SHIFTOP(i64_shl)
-UNIMPLEMENTED_I64_SHIFTOP(i64_sar)
-UNIMPLEMENTED_I64_SHIFTOP(i64_shr)
-UNIMPLEMENTED_GP_UNOP(i32_clz)
-UNIMPLEMENTED_GP_UNOP(i32_ctz)
-UNIMPLEMENTED_FP_BINOP(f32_add)
-UNIMPLEMENTED_FP_BINOP(f32_sub)
-UNIMPLEMENTED_FP_BINOP(f32_mul)
-UNIMPLEMENTED_FP_BINOP(f32_div)
-UNIMPLEMENTED_FP_BINOP(f32_copysign)
-UNIMPLEMENTED_FP_UNOP(f32_abs)
-UNIMPLEMENTED_FP_UNOP(f32_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f32_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f32_sqrt)
-UNIMPLEMENTED_FP_BINOP(f64_add)
-UNIMPLEMENTED_FP_BINOP(f64_sub)
-UNIMPLEMENTED_FP_BINOP(f64_mul)
-UNIMPLEMENTED_FP_BINOP(f64_div)
-UNIMPLEMENTED_FP_BINOP(f64_copysign)
-UNIMPLEMENTED_FP_UNOP(f64_abs)
-UNIMPLEMENTED_FP_UNOP(f64_neg)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_ceil)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_floor)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_trunc)
-UNIMPLEMENTED_FP_UNOP_RETURN_TRUE(f64_nearest_int)
-UNIMPLEMENTED_FP_UNOP(f64_sqrt)
-
-#undef UNIMPLEMENTED_I32_BINOP
-#undef UNIMPLEMENTED_I32_BINOP_I
-#undef UNIMPLEMENTED_I64_BINOP
-#undef UNIMPLEMENTED_I64_BINOP_I
-#undef UNIMPLEMENTED_GP_UNOP
-#undef UNIMPLEMENTED_FP_BINOP
-#undef UNIMPLEMENTED_FP_UNOP
-#undef UNIMPLEMENTED_FP_UNOP_RETURN_TRUE
-#undef UNIMPLEMENTED_I32_SHIFTOP
-#undef UNIMPLEMENTED_I64_SHIFTOP
+BINOP_LIST(EMIT_BINOP_FUNCTION)
+#undef BINOP_LIST
+#undef EMIT_BINOP_FUNCTION
+#undef SIGN_EXT
+#undef INT32_AND_WITH_1F
+#undef REGISTER_AND_WITH_1F
+#undef LFR_TO_REG
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
bailout(kUnsupportedArchitecture, "i32_popcnt");
@@ -469,13 +912,29 @@ bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_popcnt");
+ Popcnt64(dst.gp(), src.gp());
return true;
}
-void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
- int64_t imm) {
- bailout(kUnsupportedArchitecture, "i64_addi");
+bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_POS_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_NEG_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_0, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+ return true;
}
void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
@@ -496,6 +955,27 @@ void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
FloatMin(dst, lhs, rhs);
}
+bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_POS_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_NEG_INF, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_0, dst, src);
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+ return true;
+}
+
void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
@@ -517,61 +997,137 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ ltr(r0, rhs);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1));
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt));
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont;
+ Label done;
+ Label trap_div_unrepresentable;
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1));
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt));
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ // Check for division by zero.
+ ltr(r0, rhs);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ // Use r0 to check for kMinInt / -1.
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1));
+ bne(&cont);
+ mov(r0, Operand(kMinInt64));
+ CmpS64(lhs.gp(), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ ltgr(r0, rhs.gp());
+ b(eq, trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1));
+ bne(&cont);
+ mov(r0, Operand(kMinInt64));
+ CmpS64(lhs.gp(), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ // Check for division by zero.
+ ltgr(r0, rhs.gp());
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
-void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_clz");
-}
-
-void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "i64_ctz");
-}
-
void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
#ifdef V8_TARGET_ARCH_S390X
bailout(kUnsupportedArchitecture, "emit_u32_to_intptr");
@@ -615,41 +1171,42 @@ void LiftoffAssembler::emit_jump(Label* label) {
}
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
bool use_signed = liftoff::UseSignedOp(liftoff_cond);
- if (type.kind() == ValueType::kI32) {
- if (rhs == no_reg) {
- if (use_signed) {
- CmpS32(lhs, Operand::Zero());
- } else {
- CmpU32(lhs, Operand::Zero());
- }
- } else {
- if (use_signed) {
- CmpS32(lhs, rhs);
- } else {
- CmpU32(lhs, rhs);
- }
+ if (rhs != no_reg) {
+ switch (kind) {
+ case kI32:
+ if (use_signed) {
+ CmpS32(lhs, rhs);
+ } else {
+ CmpU32(lhs, rhs);
+ }
+ break;
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
+ case kI64:
+ if (use_signed) {
+ CmpS64(lhs, rhs);
+ } else {
+ CmpU64(lhs, rhs);
+ }
+ break;
+ default:
+ UNREACHABLE();
}
} else {
- CHECK_EQ(type.kind(), ValueType::kI64);
- if (rhs == no_reg) {
- if (use_signed) {
- CmpS64(lhs, Operand::Zero());
- } else {
- CmpU64(lhs, Operand::Zero());
- }
- } else {
- if (use_signed) {
- CmpS64(lhs, rhs);
- } else {
- CmpU64(lhs, rhs);
- }
- }
+ DCHECK_EQ(kind, kI32);
+ CHECK(use_signed);
+ CmpS32(lhs, Operand::Zero());
}
+
b(cond, label);
}
@@ -719,7 +1276,7 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
+ ValueKind kind) {
return false;
}
@@ -743,6 +1300,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
bailout(kSimd, "loadlane");
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "store lane");
+}
+
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -846,6 +1410,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "pmax unimplemented");
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f64x2.promote_low_f32x4");
+}
+
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f32x4_splat");
@@ -966,6 +1545,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64x2neg");
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v64x2_alltrue");
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i64x2_shl");
@@ -1036,6 +1620,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
bailout(kSimd, "i64x2_bitmask");
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2_uconvert_i32x4_high");
+}
+
void LiftoffAssembler::emit_i64x2_extmul_high_i32x4_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1065,11 +1669,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v32x4_anytrue");
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v32x4_alltrue");
@@ -1157,6 +1756,16 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
bailout(kSimd, "i32x4_dot_i16x8_s");
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.extadd_pairwise_i16x8_u");
+}
+
void LiftoffAssembler::emit_i32x4_extmul_low_i16x8_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1191,11 +1800,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- bailout(kSimd, "v16x8_anytrue");
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "v16x8_alltrue");
@@ -1314,6 +1918,16 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8replacelane");
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8.extadd_pairwise_i8x16_u");
+}
+
void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1338,6 +1952,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_s(LiftoffRegister dst,
bailout(kSimd, "i16x8.extmul_high_i8x16_s unsupported");
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "i16x8_q15mulr_sat_s");
+}
+
void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -1352,6 +1972,11 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
bailout(kSimd, "i8x16_shuffle");
}
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16.popcnt");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -1381,8 +2006,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
bailout(kSimd, "v8x16_anytrue");
}
@@ -1581,6 +2206,26 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kUnsupportedArchitecture, "emit_i32x4ge_u");
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2.ge_s");
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_f32x4_eq");
@@ -1672,6 +2317,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
bailout(kSimd, "f32x4_uconvert_i32x4");
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4.demote_f64x2_zero");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1736,6 +2386,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_uconvert_i16x8_high");
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4.trunc_sat_f64x2_u_zero");
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -1769,6 +2429,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4_abs");
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i64x2.abs");
+}
+
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
bailout(kUnsupportedArchitecture, "StackCheck");
}
@@ -1800,10 +2465,10 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
bailout(kUnsupportedArchitecture, "DropStackSlotsAndRet");
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
bailout(kUnsupportedArchitecture, "CallC");
}
@@ -1816,7 +2481,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
bailout(kUnsupportedArchitecture, "CallIndirect");
@@ -1842,6 +2507,16 @@ void LiftoffStackSlots::Construct() {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f64_copysign");
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_f32_copysign");
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index a95ef95f26..92005bdb8f 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -8,6 +8,7 @@
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/machine-type.h"
#include "src/heap/memory-chunk.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/simd-shuffle.h"
@@ -83,24 +84,25 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->movl(dst.gp(), src);
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
assm->movq(dst.gp(), src);
break;
- case ValueType::kF32:
+ case kF32:
assm->Movss(dst.fp(), src);
break;
- case ValueType::kF64:
+ case kF64:
assm->Movsd(dst.fp(), src);
break;
- case ValueType::kS128:
+ case kS128:
assm->Movdqu(dst.fp(), src);
break;
default:
@@ -109,21 +111,21 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Operand src,
}
inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
- ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
assm->movl(dst, src.gp());
break;
- case ValueType::kI64:
+ case kI64:
assm->movq(dst, src.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->Movss(dst, src.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->Movsd(dst, src.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->Movdqu(dst, src.fp());
break;
default:
@@ -131,21 +133,23 @@ inline void Store(LiftoffAssembler* assm, Operand dst, LiftoffRegister src,
}
}
-inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) {
- switch (type.kind()) {
- case ValueType::kI32:
- case ValueType::kI64:
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ case kI64:
+ case kRef:
+ case kOptRef:
assm->pushq(reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movss(Operand(rsp, 0), reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
assm->AllocateStackSpace(kSystemPointerSize);
assm->Movsd(Operand(rsp, 0), reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
assm->AllocateStackSpace(kSystemPointerSize * 2);
assm->Movdqu(Operand(rsp, 0), reg.fp());
break;
@@ -187,7 +191,9 @@ void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
popq(rbp);
}
-void LiftoffAssembler::AlignFrameSize() {}
+void LiftoffAssembler::AlignFrameSize() {
+ max_used_spill_offset_ = RoundUp(max_used_spill_offset_, kSystemPointerSize);
+}
void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
// The frame_size includes the frame marker. The frame marker has already been
@@ -195,7 +201,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
// anymore.
int frame_size = GetTotalFrameSize() - kSystemPointerSize;
// Need to align sp to system pointer size.
- frame_size = RoundUp(frame_size, kSystemPointerSize);
+ DCHECK_EQ(frame_size, RoundUp(frame_size, kSystemPointerSize));
// We can't run out of space, just pass anything big enough to not cause the
// assembler to try to grow the buffer.
constexpr int kAvailableSpace = 64;
@@ -237,36 +243,36 @@ constexpr int LiftoffAssembler::StaticStackFrameSize() {
return liftoff::kInstanceOffset;
}
-int LiftoffAssembler::SlotSizeForType(ValueType type) {
- return type.is_reference_type() ? kSystemPointerSize
- : type.element_size_bytes();
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ return is_reference_type(kind) ? kSystemPointerSize
+ : element_size_bytes(kind);
}
-bool LiftoffAssembler::NeedsAlignment(ValueType type) {
- return type.is_reference_type();
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return is_reference_type(kind);
}
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
if (value.to_i32() == 0 && RelocInfo::IsNone(rmode)) {
xorl(reg.gp(), reg.gp());
} else {
movl(reg.gp(), Immediate(value.to_i32(), rmode));
}
break;
- case ValueType::kI64:
+ case kI64:
if (RelocInfo::IsNone(rmode)) {
TurboAssembler::Set(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
break;
- case ValueType::kF32:
+ case kF32:
TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
break;
- case ValueType::kF64:
+ case kF64:
TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
break;
default:
@@ -274,21 +280,34 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
}
}
-void LiftoffAssembler::LoadFromInstance(Register dst, int offset, int size) {
- DCHECK_LE(0, offset);
- DCHECK(size == 4 || size == 8);
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
movq(dst, liftoff::GetInstanceOperand());
- if (size == 4) {
- movl(dst, Operand(dst, offset));
- } else {
- movq(dst, Operand(dst, offset));
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ Operand src{instance, offset};
+ switch (size) {
+ case 1:
+ movzxbl(dst, src);
+ break;
+ case 4:
+ movl(dst, src);
+ break;
+ case 8:
+ movq(dst, src);
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
-void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, int offset) {
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int offset) {
DCHECK_LE(0, offset);
- movq(dst, liftoff::GetInstanceOperand());
- LoadTaggedPointerField(dst, Operand(dst, offset));
+ LoadTaggedPointerField(dst, Operand(instance, offset));
}
void LiftoffAssembler::SpillInstance(Register instance) {
@@ -316,13 +335,16 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
LiftoffRegister src,
- LiftoffRegList pinned) {
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
DCHECK_GE(offset_imm, 0);
- Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Operand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg,
static_cast<uint32_t>(offset_imm));
StoreTaggedField(dst_op, src.gp());
+ if (skip_write_barrier) return;
+
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
Label exit;
CheckPageFlag(dst_addr, scratch,
@@ -756,82 +778,83 @@ void LiftoffAssembler::AtomicFence() { mfence(); }
void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
Operand src(rbp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Load(this, dst, src, type);
+ liftoff::Load(this, dst, src, kind);
}
void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
uint32_t caller_slot_idx,
- ValueType type) {
+ ValueKind kind) {
Operand dst(rbp, kSystemPointerSize * (caller_slot_idx + 1));
- liftoff::Store(this, dst, src, type);
+ liftoff::Store(this, dst, src, kind);
}
void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister reg, int offset,
- ValueType type) {
+ ValueKind kind) {
Operand src(rsp, offset);
- liftoff::Load(this, reg, src, type);
+ liftoff::Load(this, reg, src, kind);
}
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst_offset, src_offset);
Operand dst = liftoff::GetStackSlot(dst_offset);
Operand src = liftoff::GetStackSlot(src_offset);
- if (type.element_size_log2() == 2) {
+ if (element_size_log2(kind) == 2) {
movl(kScratchRegister, src);
movl(dst, kScratchRegister);
} else {
- DCHECK_EQ(3, type.element_size_log2());
+ DCHECK_EQ(3, element_size_log2(kind));
movq(kScratchRegister, src);
movq(dst, kScratchRegister);
}
}
-void LiftoffAssembler::Move(Register dst, Register src, ValueType type) {
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmI32) {
+ if (kind == kI32) {
movl(dst, src);
} else {
- DCHECK(kWasmI64 == type || type.is_reference_type());
+ DCHECK(kI64 == kind || is_reference_type(kind));
movq(dst, src);
}
}
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
- ValueType type) {
+ ValueKind kind) {
DCHECK_NE(dst, src);
- if (type == kWasmF32) {
+ if (kind == kF32) {
Movss(dst, src);
- } else if (type == kWasmF64) {
+ } else if (kind == kF64) {
Movsd(dst, src);
} else {
- DCHECK_EQ(kWasmS128, type);
+ DCHECK_EQ(kS128, kind);
Movapd(dst, src);
}
}
-void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) {
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
movl(dst, reg.gp());
break;
- case ValueType::kI64:
- case ValueType::kOptRef:
- case ValueType::kRef:
- case ValueType::kRtt:
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
movq(dst, reg.gp());
break;
- case ValueType::kF32:
+ case kF32:
Movss(dst, reg.fp());
break;
- case ValueType::kF64:
+ case kF64:
Movsd(dst, reg.fp());
break;
- case ValueType::kS128:
+ case kS128:
Movdqu(dst, reg.fp());
break;
default:
@@ -843,10 +866,10 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
RecordUsedSpillOffset(offset);
Operand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
- case ValueType::kI32:
+ case kI32:
movl(dst, Immediate(value.to_i32()));
break;
- case ValueType::kI64: {
+ case kI64: {
if (is_int32(value.to_i64())) {
// Sign extend low word.
movq(dst, Immediate(static_cast<int32_t>(value.to_i64())));
@@ -866,8 +889,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
}
}
-void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) {
- liftoff::Load(this, reg, liftoff::GetStackSlot(offset), type);
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ liftoff::Load(this, reg, liftoff::GetStackSlot(offset), kind);
}
void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
@@ -1119,16 +1142,16 @@ void LiftoffAssembler::emit_i32_xori(Register dst, Register lhs, int32_t imm) {
}
namespace liftoff {
-template <ValueType::Kind type>
+template <ValueKind kind>
inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
Register src, Register amount,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->Move(kScratchRegister, src, ValueType::Primitive(type));
- if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type));
+ assm->Move(kScratchRegister, src, kind);
+ if (amount != rcx) assm->Move(rcx, amount, kind);
(assm->*emit_shift)(kScratchRegister);
- assm->Move(rcx, kScratchRegister, ValueType::Primitive(type));
+ assm->Move(rcx, kScratchRegister, kind);
return;
}
@@ -1140,11 +1163,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
- assm->Move(rcx, amount, ValueType::Primitive(type));
+ assm->Move(rcx, amount, kind);
}
// Do the actual shift.
- if (dst != src) assm->Move(dst, src, ValueType::Primitive(type));
+ if (dst != src) assm->Move(dst, src, kind);
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -1154,8 +1177,8 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
void LiftoffAssembler::emit_i32_shl(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::shll_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::shll_cl);
}
void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
@@ -1166,8 +1189,8 @@ void LiftoffAssembler::emit_i32_shli(Register dst, Register src,
void LiftoffAssembler::emit_i32_sar(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::sarl_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::sarl_cl);
}
void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
@@ -1178,8 +1201,8 @@ void LiftoffAssembler::emit_i32_sari(Register dst, Register src,
void LiftoffAssembler::emit_i32_shr(Register dst, Register src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI32>(this, dst, src, amount,
- &Assembler::shrl_cl);
+ liftoff::EmitShiftOperation<kI32>(this, dst, src, amount,
+ &Assembler::shrl_cl);
}
void LiftoffAssembler::emit_i32_shri(Register dst, Register src,
@@ -1317,8 +1340,8 @@ void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shlq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shlq_cl);
}
void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
@@ -1329,8 +1352,8 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::sarq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::sarq_cl);
}
void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
@@ -1341,8 +1364,8 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src,
void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src,
Register amount) {
- liftoff::EmitShiftOperation<ValueType::kI64>(this, dst.gp(), src.gp(), amount,
- &Assembler::shrq_cl);
+ liftoff::EmitShiftOperation<kI64>(this, dst.gp(), src.gp(), amount,
+ &Assembler::shrq_cl);
}
void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src,
@@ -2027,27 +2050,28 @@ void LiftoffAssembler::emit_jump(Label* label) { jmp(label); }
void LiftoffAssembler::emit_jump(Register target) { jmp(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
- Label* label, ValueType type,
+ Label* label, ValueKind kind,
Register lhs, Register rhs) {
Condition cond = liftoff::ToCondition(liftoff_cond);
if (rhs != no_reg) {
- switch (type.kind()) {
- case ValueType::kI32:
+ switch (kind) {
+ case kI32:
cmpl(lhs, rhs);
break;
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kRtt:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
V8_FALLTHROUGH;
- case ValueType::kI64:
+ case kI64:
cmpq(lhs, rhs);
break;
default:
UNREACHABLE();
}
} else {
- DCHECK_EQ(type, kWasmI32);
+ DCHECK_EQ(kind, kI32);
testl(lhs, lhs);
}
@@ -2136,12 +2160,12 @@ void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
LiftoffRegister true_value,
LiftoffRegister false_value,
- ValueType type) {
- if (type != kWasmI32 && type != kWasmI64) return false;
+ ValueKind kind) {
+ if (kind != kI32 && kind != kI64) return false;
testl(condition, condition);
- if (type == kWasmI32) {
+ if (kind == kI32) {
if (dst == false_value) {
cmovl(not_zero, dst.gp(), true_value.gp());
} else {
@@ -2311,7 +2335,11 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister src) {
+ LiftoffRegister src,
+ base::Optional<CpuFeature> feature = base::nullopt) {
+ base::Optional<CpuFeatureScope> sse_scope;
+ if (feature.has_value()) sse_scope.emplace(assm, *feature);
+
XMMRegister tmp = kScratchDoubleReg;
assm->xorq(dst.gp(), dst.gp());
assm->Pxor(tmp, tmp);
@@ -2395,6 +2423,25 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
}
}
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ Operand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm);
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ MachineRepresentation rep = type.mem_rep();
+ if (rep == MachineRepresentation::kWord8) {
+ Pextrb(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord16) {
+ Pextrw(dst_op, src.fp(), lane);
+ } else if (rep == MachineRepresentation::kWord32) {
+ S128Store32Lane(dst_op, src.fp(), lane);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kWord64, rep);
+ S128Store64Lane(dst_op, src.fp(), lane);
+ }
+}
+
void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs,
@@ -2436,13 +2483,12 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- XMMRegister mask = kScratchDoubleReg;
- // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- TurboAssembler::Move(mask, uint32_t{0x70707070});
- Pshufd(mask, mask, uint8_t{0x0});
- Paddusb(mask, rhs.fp());
- Pshufb(dst.fp(), lhs.fp(), mask);
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
@@ -2658,6 +2704,71 @@ void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
Pcmpeqd(dst.fp(), ref);
}
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdCommutativeBinOp<&Assembler::vpcmpeqq, &Assembler::pcmpeqq>(
+ this, dst, lhs, rhs, SSE4_1);
+ Pcmpeqq(kScratchDoubleReg, kScratchDoubleReg);
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst == lhs.
+ if (dst != lhs) {
+ movdqa(dst.fp(), lhs.fp());
+ }
+ I64x2GtS(dst.fp(), dst.fp(), rhs.fp());
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GtS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movaps(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GtS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ }
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ // Different register alias requirements depending on CpuFeatures supported:
+ if (CpuFeatures::IsSupported(AVX)) {
+ // 1. AVX, no requirements.
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ } else if (CpuFeatures::IsSupported(SSE4_2)) {
+ // 2. SSE4_2, dst != lhs.
+ if (dst == lhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movdqa(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ } else {
+ // 3. Else, dst != lhs && dst != rhs (lhs == rhs is ok).
+ if (dst == lhs || dst == rhs) {
+ // macro-assembler uses kScratchDoubleReg, so don't use it.
+ I64x2GeS(liftoff::kScratchDoubleReg2, lhs.fp(), rhs.fp());
+ movaps(dst.fp(), liftoff::kScratchDoubleReg2);
+ } else {
+ I64x2GeS(dst.fp(), lhs.fp(), rhs.fp());
+ }
+ }
+}
+
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vcmpeqps, &Assembler::cmpeqps>(
@@ -2773,8 +2884,8 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
liftoff::EmitAnyTrue(this, dst, src);
}
@@ -2790,7 +2901,7 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
LiftoffRegister tmp_simd =
GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
// Mask off the unwanted bits before word-shifting.
@@ -2918,7 +3029,7 @@ void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
if (CpuFeatures::IsSupported(AVX)) {
@@ -3017,11 +3128,6 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
@@ -3148,6 +3254,18 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
this, dst, lhs, rhs, base::Optional<CpuFeature>(SSE4_1));
}
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01());
+ Pmaddubsw(dst.fp(), src.fp(), op);
+}
+
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -3175,6 +3293,12 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
/*is_signed=*/false);
}
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+}
+
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3186,11 +3310,6 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
-void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
- LiftoffRegister src) {
- liftoff::EmitAnyTrue(this, dst, src);
-}
-
void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
@@ -3292,6 +3411,18 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001());
+ Pmaddwd(dst.fp(), src.fp(), op);
+}
+
+void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+}
+
namespace liftoff {
// Helper function to check for register aliasing, AVX support, and moves
// registers around before calling the actual macro-assembler function.
@@ -3357,6 +3488,11 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1);
+}
+
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllq, &Assembler::psllq, 6>(this, dst,
@@ -3408,7 +3544,7 @@ void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ static constexpr RegClass tmp_rc = reg_class_for(kS128);
LiftoffRegister tmp1 =
GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, lhs, rhs));
LiftoffRegister tmp2 =
@@ -3465,6 +3601,26 @@ void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
Movmskpd(dst.gp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovsxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2SConvertI32x4High(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovzxdq(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2UConvertI32x4High(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
@@ -3777,6 +3933,21 @@ void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, rhs, lhs);
}
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2pd(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtps2pd(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
// NAN->0
@@ -3860,6 +4031,11 @@ void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
Addps(dst.fp(), kScratchDoubleReg); // Add hi and lo, may round.
}
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtpd2ps(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3932,6 +4108,16 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
I32x4UConvertI16x8High(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3968,6 +4154,11 @@ void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
Pabsd(dst.fp(), src.fp());
}
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ I64x2Abs(dst.fp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -4194,17 +4385,17 @@ void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
ret(static_cast<int>(num_stack_slots * kSystemPointerSize));
}
-void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* args,
const LiftoffRegister* rets,
- ValueType out_argument_type, int stack_bytes,
+ ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
AllocateStackSpace(stack_bytes);
int arg_bytes = 0;
- for (ValueType param_type : sig->parameters()) {
- liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_type);
- arg_bytes += param_type.element_size_bytes();
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, Operand(rsp, arg_bytes), *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
}
DCHECK_LE(arg_bytes, stack_bytes);
@@ -4229,8 +4420,8 @@ void LiftoffAssembler::CallC(const wasm::FunctionSig* sig,
}
// Load potential output value from the buffer on the stack.
- if (out_argument_type != kWasmStmt) {
- liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_type);
+ if (out_argument_kind != kStmt) {
+ liftoff::Load(this, *next_result_reg, Operand(rsp, 0), out_argument_kind);
}
addq(rsp, Immediate(stack_bytes));
@@ -4244,7 +4435,7 @@ void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
near_jmp(addr, RelocInfo::WASM_CALL);
}
-void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig,
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
@@ -4290,12 +4481,12 @@ void LiftoffStackSlots::Construct() {
const LiftoffAssembler::VarState& src = slot.src_;
switch (src.loc()) {
case LiftoffAssembler::VarState::kStack:
- if (src.type() == kWasmI32) {
+ if (src.kind() == kI32) {
// Load i32 values to a register first to ensure they are zero
// extended.
asm_->movl(kScratchRegister, liftoff::GetStackSlot(slot.src_offset_));
asm_->pushq(kScratchRegister);
- } else if (src.type() == kWasmS128) {
+ } else if (src.kind() == kS128) {
// Since offsets are subtracted from sp, we need a smaller offset to
// push the top of a s128 value.
asm_->pushq(liftoff::GetStackSlot(slot.src_offset_ - 8));
@@ -4309,7 +4500,7 @@ void LiftoffStackSlots::Construct() {
}
break;
case LiftoffAssembler::VarState::kRegister:
- liftoff::push(asm_, src.reg(), src.type());
+ liftoff::push(asm_, src.reg(), src.kind());
break;
case LiftoffAssembler::VarState::kIntConst:
asm_->pushq(Immediate(src.i32_const()));