summaryrefslogtreecommitdiff
path: root/chromium/v8/src/wasm
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/v8/src/wasm
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-85-based.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/v8/src/wasm')
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h512
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h667
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h625
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.cc124
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h104
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc370
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.h2
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h206
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h348
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h174
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h174
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h630
-rw-r--r--chromium/v8/src/wasm/c-api.cc146
-rw-r--r--chromium/v8/src/wasm/c-api.h2
-rw-r--r--chromium/v8/src/wasm/decoder.h1
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h2667
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.cc40
-rw-r--r--chromium/v8/src/wasm/function-body-decoder.h9
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc7
-rw-r--r--chromium/v8/src/wasm/function-compiler.h2
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc142
-rw-r--r--chromium/v8/src/wasm/local-decl-encoder.cc25
-rw-r--r--chromium/v8/src/wasm/memory-tracing.h6
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc202
-rw-r--r--chromium/v8/src/wasm/module-compiler.h3
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc296
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc112
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.cc325
-rw-r--r--chromium/v8/src/wasm/streaming-decoder.h212
-rw-r--r--chromium/v8/src/wasm/struct-types.h36
-rw-r--r--chromium/v8/src/wasm/sync-streaming-decoder.cc112
-rw-r--r--chromium/v8/src/wasm/value-type.h330
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc65
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h31
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h22
-rw-r--r--chromium/v8/src/wasm/wasm-debug-evaluate.cc20
-rw-r--r--chromium/v8/src/wasm/wasm-debug-evaluate.h1
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc725
-rw-r--r--chromium/v8/src/wasm/wasm-debug.h20
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc85
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.cc122
-rw-r--r--chromium/v8/src/wasm/wasm-external-refs.h14
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h33
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.cc4456
-rw-r--r--chromium/v8/src/wasm/wasm-interpreter.h228
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc216
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.cc25
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc78
-rw-r--r--chromium/v8/src/wasm/wasm-module.h41
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h54
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc70
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h88
-rw-r--r--chromium/v8/src/wasm/wasm-objects.tq15
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes-inl.h631
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc592
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h127
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.cc167
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.h42
-rw-r--r--chromium/v8/src/wasm/wasm-value.h2
59 files changed, 8179 insertions, 8402 deletions
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index eb91b79ea55..4a9cffb9728 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -332,6 +332,71 @@ inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
}
}
+constexpr int MaskFromNeonDataType(NeonDataType dt) {
+ switch (dt) {
+ case NeonS8:
+ case NeonU8:
+ return 7;
+ case NeonS16:
+ case NeonU16:
+ return 15;
+ case NeonS32:
+ case NeonU32:
+ return 31;
+ case NeonS64:
+ case NeonU64:
+ return 63;
+ }
+}
+
+enum ShiftDirection { kLeft, kRight };
+
+template <ShiftDirection dir = kLeft, NeonDataType dt, NeonSize sz>
+inline void EmitSimdShift(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ constexpr int mask = MaskFromNeonDataType(dt);
+ UseScratchRegisterScope temps(assm);
+ QwNeonRegister tmp = temps.AcquireQ();
+ Register shift = temps.Acquire();
+ assm->and_(shift, rhs.gp(), Operand(mask));
+ assm->vdup(sz, tmp, shift);
+ if (dir == kRight) {
+ assm->vneg(sz, tmp, tmp);
+ }
+ assm->vshl(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), tmp);
+}
+
+template <ShiftDirection dir, NeonDataType dt>
+inline void EmitSimdShiftImmediate(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ // vshr by 0 is not allowed, so check for it, and only move if dst != lhs.
+ int32_t shift = rhs & MaskFromNeonDataType(dt);
+ if (shift) {
+ if (dir == kLeft) {
+ assm->vshl(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), shift);
+ } else {
+ assm->vshr(dt, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), shift);
+ }
+ } else if (dst != lhs) {
+ assm->vmov(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs));
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(assm);
+ DwVfpRegister scratch = temps.AcquireD();
+ assm->vpmax(NeonU32, scratch, src.low_fp(), src.high_fp());
+ assm->vpmax(NeonU32, scratch, scratch, scratch);
+ assm->ExtractLane(dst.gp(), scratch, NeonS32, 0);
+ assm->cmp(dst.gp(), Operand(0));
+ assm->mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -437,7 +502,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case ValueType::kF64: {
- Register extra_scratch = GetUnusedRegister(kGpReg).gp();
+ Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
@@ -1171,7 +1236,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -1216,7 +1281,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
// The scratch register will be required by str if multiple instructions
// are required to encode the offset, and so we cannot use it in that case.
if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
- src = GetUnusedRegister(kGpReg).gp();
+ src = GetUnusedRegister(kGpReg, {}).gp();
} else {
src = temps.Acquire();
}
@@ -1758,7 +1823,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovLow(scratch, lhs);
// Clear sign bit in {scratch}.
@@ -1777,7 +1842,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
- Register scratch = GetUnusedRegister(kGpReg).gp();
+ Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.
@@ -1862,6 +1927,38 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
b(trap, ge);
return true;
}
+ case kExprI32SConvertSatF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32UConvertSatF32: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f32(
+ scratch_f,
+ liftoff::GetFloatRegister(src.fp())); // f32 -> u32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32SConvertSatF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_s32_f64(scratch_f, src.fp()); // f64 -> i32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
+ case kExprI32UConvertSatF64: {
+ UseScratchRegisterScope temps(this);
+ SwVfpRegister scratch_f = temps.AcquireS();
+ vcvt_u32_f64(scratch_f, src.fp()); // f64 -> u32 round to zero.
+ vmov(dst.gp(), scratch_f);
+ return true;
+ }
case kExprI32ReinterpretF32:
vmov(dst.gp(), liftoff::GetFloatRegister(src.fp()));
return true;
@@ -1914,10 +2011,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64UConvertI64:
case kExprI64SConvertF32:
case kExprI64UConvertF32:
+ case kExprI64SConvertSatF32:
+ case kExprI64UConvertSatF32:
case kExprF32SConvertI64:
case kExprF32UConvertI64:
case kExprI64SConvertF64:
case kExprI64UConvertF64:
+ case kExprI64SConvertSatF64:
+ case kExprI64UConvertSatF64:
// These cases can be handled by the C fallback function.
return false;
default:
@@ -2052,6 +2153,79 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ Register actual_src_addr = liftoff::CalculateActualAddress(
+ this, &temps, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ vld1(Neon8, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS8, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint8()) {
+ vld1(Neon8, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU8, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Int16()) {
+ vld1(Neon16, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS16, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint16()) {
+ vld1(Neon16, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU16, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Int32()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonS32, liftoff::GetSimd128Register(dst), dst.low_fp());
+ } else if (memtype == MachineType::Uint32()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ vmovl(NeonU32, liftoff::GetSimd128Register(dst), dst.low_fp());
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ vld1r(Neon8, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int16()) {
+ vld1r(Neon16, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int32()) {
+ vld1r(Neon32, NeonListOperand(liftoff::GetSimd128Register(dst)),
+ NeonMemOperand(actual_src_addr));
+ } else if (memtype == MachineType::Int64()) {
+ vld1(Neon32, NeonListOperand(dst.low_fp()),
+ NeonMemOperand(actual_src_addr));
+ TurboAssembler::Move(dst.high_fp(), dst.low_fp());
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ UseScratchRegisterScope temps(this);
+
+ NeonListOperand table(liftoff::GetSimd128Register(lhs));
+ if (dst == lhs) {
+ // dst will be overwritten, so keep the table somewhere else.
+ QwNeonRegister tbl = temps.AcquireQ();
+ TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs));
+ table = NeonListOperand(tbl);
+ }
+
+ vtbl(dst.low_fp(), table, rhs.low_fp());
+ vtbl(dst.high_fp(), table, rhs.high_fp());
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
TurboAssembler::Move(dst.low_fp(), src.fp());
@@ -2273,12 +2447,37 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS64, Neon32>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
+ vshl(NeonS64, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS64, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS64>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU64, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU64>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2306,15 +2505,18 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
QwNeonRegister tmp1 = left;
QwNeonRegister tmp2 = right;
- if (cache_state()->is_used(lhs) && cache_state()->is_used(rhs)) {
+ LiftoffRegList used_plus_dst =
+ cache_state()->used_registers | LiftoffRegList::ForRegs(dst);
+
+ if (used_plus_dst.has(lhs) && used_plus_dst.has(rhs)) {
tmp1 = temps.AcquireQ();
// We only have 1 scratch Q register, so acquire another ourselves.
LiftoffRegList pinned = LiftoffRegList::ForRegs(dst);
LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
tmp2 = liftoff::GetSimd128Register(unused_pair);
- } else if (cache_state()->is_used(lhs)) {
+ } else if (used_plus_dst.has(lhs)) {
tmp1 = temps.AcquireQ();
- } else if (cache_state()->is_used(rhs)) {
+ } else if (used_plus_dst.has(rhs)) {
tmp2 = temps.AcquireQ();
}
@@ -2363,14 +2565,79 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU32, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU32, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS32, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS32, tmp, liftoff::GetSimd128Register(src), 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x0000'0002'0000'0001));
+ vmov(mask.high(), Double((uint64_t)0x0000'0008'0000'0004));
+ vand(tmp, mask, tmp);
+ vpadd(Neon32, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon32, tmp.low(), tmp.low(), kDoubleRegZero);
+ VmovLow(dst.gp(), tmp.low());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS32, Neon32>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
+ vshl(NeonS32, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS32, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS32>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU32, Neon32>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU32>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2430,14 +2697,81 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU16, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU16, scratch, scratch, scratch);
+ vpmin(NeonU16, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS16, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS16, tmp, liftoff::GetSimd128Register(src), 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x0008'0004'0002'0001));
+ vmov(mask.high(), Double((uint64_t)0x0080'0040'0020'0010));
+ vand(tmp, mask, tmp);
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vmov(NeonU16, dst.gp(), tmp.low(), 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS16, Neon16>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
+ vshl(NeonS16, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS16, Neon16>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS16>(this, dst, lhs,
+ rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU16, Neon16>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU16>(this, dst, lhs,
+ rhs);
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2537,6 +2871,60 @@ void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
imm_lane_idx);
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ Simd128Register dest = liftoff::GetSimd128Register(dst);
+ Simd128Register src1 = liftoff::GetSimd128Register(lhs);
+ Simd128Register src2 = liftoff::GetSimd128Register(rhs);
+ UseScratchRegisterScope temps(this);
+ Simd128Register scratch = temps.AcquireQ();
+ if ((src1 != src2) && src1.code() + 1 != src2.code()) {
+ // vtbl requires the operands to be consecutive or the same.
+ // If they are the same, we build a smaller list operand (table_size = 2).
+ // If they are not the same, and not consecutive, we move the src1 and src2
+ // to q14 and q15, which will be unused since they are not allocatable in
+ // Liftoff. If the operands are the same, then we build a smaller list
+ // operand below.
+ static_assert(!(kLiftoffAssemblerFpCacheRegs &
+ (d28.bit() | d29.bit() | d30.bit() | d31.bit())),
+ "This only works if q14-q15 (d28-d31) are not used.");
+ vmov(q14, src1);
+ src1 = q14;
+ vmov(q15, src2);
+ src2 = q15;
+ }
+
+ int table_size = src1 == src2 ? 2 : 4;
+ uint32_t mask = table_size == 2 ? 0x0F0F0F0F : 0x1F1F1F1F;
+
+ int scratch_s_base = scratch.code() * 4;
+ for (int j = 0; j < 4; j++) {
+ uint32_t imm = 0;
+ for (int i = 3; i >= 0; i--) {
+ imm = (imm << 8) | shuffle[j * 4 + i];
+ }
+ uint32_t four_lanes = imm;
+ // Ensure indices are in [0,15] if table_size is 2, or [0,31] if 4.
+ four_lanes &= mask;
+ vmov(SwVfpRegister::from_code(scratch_s_base + j),
+ Float32::FromBits(four_lanes));
+ }
+
+ DwVfpRegister table_base = src1.low();
+ NeonListOperand table(table_base, table_size);
+
+ if (dest != src1 && dest != src2) {
+ vtbl(dest.low(), table, scratch.low());
+ vtbl(dest.high(), table, scratch.high());
+ } else {
+ vtbl(scratch.low(), table, scratch.low());
+ vtbl(scratch.high(), table, scratch.high());
+ vmov(dest, scratch);
+ }
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
vdup(Neon8, liftoff::GetSimd128Register(dst), src.gp());
@@ -2569,14 +2957,82 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
liftoff::GetSimd128Register(src));
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ DwVfpRegister scratch = temps.AcquireD();
+ vpmin(NeonU8, scratch, src.low_fp(), src.high_fp());
+ vpmin(NeonU8, scratch, scratch, scratch);
+ vpmin(NeonU8, scratch, scratch, scratch);
+ vpmin(NeonU8, scratch, scratch, scratch);
+ ExtractLane(dst.gp(), scratch, NeonS8, 0);
+ cmp(dst.gp(), Operand(0));
+ mov(dst.gp(), Operand(1), LeaveCC, ne);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ Simd128Register tmp = liftoff::GetSimd128Register(src);
+ Simd128Register mask = temps.AcquireQ();
+
+ if (cache_state()->is_used(src)) {
+ // We only have 1 scratch Q register, so try and reuse src.
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src);
+ LiftoffRegister unused_pair = GetUnusedRegister(kFpRegPair, pinned);
+ mask = liftoff::GetSimd128Register(unused_pair);
+ }
+
+ vshr(NeonS8, tmp, liftoff::GetSimd128Register(src), 7);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ vmov(mask.low(), Double((uint64_t)0x8040'2010'0804'0201));
+ vmov(mask.high(), Double((uint64_t)0x8040'2010'0804'0201));
+ vand(tmp, mask, tmp);
+ vext(mask, tmp, tmp, 8);
+ vzip(Neon8, mask, tmp);
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.high());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vpadd(Neon16, tmp.low(), tmp.low(), tmp.low());
+ vmov(NeonU16, dst.gp(), tmp.low(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
+ liftoff::EmitSimdShift<liftoff::kLeft, NeonS8, Neon8>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
+ vshl(NeonS8, liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(lhs), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonS8, Neon8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonS8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::kRight, NeonU8, Neon8>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftImmediate<liftoff::kRight, NeonU8>(this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2842,6 +3298,30 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
liftoff::GetSimd128Register(src2));
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_s32_f32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_u32_f32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_f32_s32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ vcvt_f32_u32(liftoff::GetSimd128Register(dst),
+ liftoff::GetSimd128Register(src));
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 9c142e4ad0f..03643c6edd7 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -104,6 +104,76 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm,
return MemOperand(addr.X(), offset_imm);
}
+enum class ShiftDirection : bool { kLeft, kRight };
+
+enum class ShiftSign : bool { kSigned, kUnsigned };
+
+template <ShiftDirection dir, ShiftSign sign = ShiftSign::kSigned>
+inline void EmitSimdShift(LiftoffAssembler* assm, VRegister dst, VRegister lhs,
+ Register rhs, VectorFormat format) {
+ DCHECK_IMPLIES(dir == ShiftDirection::kLeft, sign == ShiftSign::kSigned);
+ DCHECK(dst.IsSameFormat(lhs));
+ DCHECK_EQ(dst.LaneCount(), LaneCountFromFormat(format));
+
+ UseScratchRegisterScope temps(assm);
+ VRegister tmp = temps.AcquireV(format);
+ Register shift = dst.Is2D() ? temps.AcquireX() : temps.AcquireW();
+ int mask = LaneSizeInBitsFromFormat(format) - 1;
+ assm->And(shift, rhs, mask);
+ assm->Dup(tmp, shift);
+
+ if (dir == ShiftDirection::kRight) {
+ assm->Neg(tmp, tmp);
+ }
+
+ if (sign == ShiftSign::kSigned) {
+ assm->Sshl(dst, lhs, tmp);
+ } else {
+ assm->Ushl(dst, lhs, tmp);
+ }
+}
+
+template <VectorFormat format, ShiftSign sign>
+inline void EmitSimdShiftRightImmediate(LiftoffAssembler* assm, VRegister dst,
+ VRegister lhs, int32_t rhs) {
+ // Sshr and Ushr does not allow shifts to be 0, so check for that here.
+ int mask = LaneSizeInBitsFromFormat(format) - 1;
+ int32_t shift = rhs & mask;
+ if (!shift) {
+ if (dst != lhs) {
+ assm->Mov(dst, lhs);
+ }
+ return;
+ }
+
+ if (sign == ShiftSign::kSigned) {
+ assm->Sshr(dst, lhs, rhs & mask);
+ } else {
+ assm->Ushr(dst, lhs, rhs & mask);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ // AnyTrue does not depend on the number of lanes, so we can use V4S for all.
+ UseScratchRegisterScope scope(assm);
+ VRegister temp = scope.AcquireV(kFormatS);
+ assm->Umaxv(temp, src.fp().V4S());
+ assm->Umov(dst.gp().W(), temp, 0);
+ assm->Cmp(dst.gp().W(), 0);
+ assm->Cset(dst.gp().W(), ne);
+}
+
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src, VectorFormat format) {
+ UseScratchRegisterScope scope(assm);
+ VRegister temp = scope.AcquireV(ScalarFormatFromFormat(format));
+ assm->Uminv(temp, VRegister::Create(src.fp().code(), format));
+ assm->Umov(dst.gp().W(), temp, 0);
+ assm->Cmp(dst.gp().W(), 0);
+ assm->Cset(dst.gp().W(), ne);
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -299,8 +369,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
Ldr(dst.fp().Q(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -337,65 +405,280 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Str(src.fp().Q(), dst_op);
break;
+ }
+}
+
+namespace liftoff {
+#define __ lasm->
+
+inline Register CalculateActualAddress(LiftoffAssembler* lasm,
+ Register addr_reg, Register offset_reg,
+ int32_t offset_imm,
+ Register result_reg) {
+ DCHECK_NE(offset_reg, no_reg);
+ DCHECK_NE(addr_reg, no_reg);
+ __ Add(result_reg, addr_reg, Operand(offset_reg));
+ if (offset_imm != 0) {
+ __ Add(result_reg, result_reg, Operand(offset_imm));
+ }
+ return result_reg;
+}
+
+enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
+
+inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LiftoffRegister value, LiftoffRegister result,
+ StoreType type, Binop op) {
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Make sure that {result} is unique.
+ Register result_reg = result.gp();
+ if (result_reg == value.gp() || result_reg == dst_addr ||
+ result_reg == offset_reg) {
+ result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to be
+ // the same register.
+ Register temp = temps.AcquireX();
+
+ Label retry;
+ __ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ ldaxrb(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ ldaxrh(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ ldaxr(result_reg.W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ ldaxr(result_reg.X(), actual_addr);
+ break;
default:
UNREACHABLE();
}
+
+ switch (op) {
+ case Binop::kAdd:
+ __ add(temp, result_reg, value.gp());
+ break;
+ case Binop::kSub:
+ __ sub(temp, result_reg, value.gp());
+ break;
+ case Binop::kAnd:
+ __ and_(temp, result_reg, value.gp());
+ break;
+ case Binop::kOr:
+ __ orr(temp, result_reg, value.gp());
+ break;
+ case Binop::kXor:
+ __ eor(temp, result_reg, value.gp());
+ break;
+ case Binop::kExchange:
+ __ mov(temp, value.gp());
+ break;
+ }
+
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ stlxrb(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ stlxrh(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ stlxr(store_result.W(), temp.W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ stlxr(store_result.W(), temp.X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ Cbnz(store_result.W(), &retry);
+
+ if (result_reg != result.gp()) {
+ __ mov(result.gp(), result_reg);
+ }
}
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ Register src_reg = liftoff::CalculateActualAddress(
+ this, src_addr, offset_reg, offset_imm, temps.AcquireX());
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ldarb(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ Ldarh(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32U:
+ Ldar(dst.gp().W(), src_reg);
+ return;
+ case LoadType::kI64Load:
+ Ldar(dst.gp().X(), src_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ UseScratchRegisterScope temps(this);
+ Register dst_reg = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ Stlrb(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ Stlrh(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ Stlr(src.gp().W(), dst_reg);
+ return;
+ case StoreType::kI64Store:
+ Stlr(src.gp().X(), dst_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uint32_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kExchange);
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
+
+ Register result_reg = result.gp();
+ if (pinned.has(result)) {
+ result_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(this);
+ Register store_result = temps.AcquireW();
+
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.AcquireX());
+
+ Label retry;
+ Label done;
+ Bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ ldaxrb(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTB));
+ B(ne, &done);
+ stlxrb(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ ldaxrh(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTH));
+ B(ne, &done);
+ stlxrh(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ ldaxr(result_reg.W(), actual_addr);
+ Cmp(result.gp().W(), Operand(expected.gp().W(), UXTW));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().W(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ ldaxr(result_reg.X(), actual_addr);
+ Cmp(result.gp().X(), Operand(expected.gp().X(), UXTX));
+ B(ne, &done);
+ stlxr(store_result.W(), new_value.gp().X(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Cbnz(store_result.W(), &retry);
+ Bind(&done);
+
+ if (result_reg != result.gp()) {
+ mov(result.gp(), result_reg);
+ }
}
void LiftoffAssembler::AtomicFence() { Dmb(InnerShareable, BarrierAll); }
@@ -439,7 +722,7 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
Fmov(dst.D(), src.D());
} else {
DCHECK_EQ(kWasmS128, type);
- Fmov(dst.Q(), src.Q());
+ Mov(dst.Q(), src.Q());
}
}
@@ -921,6 +1204,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
B(trap, ne);
return true;
}
+ case kExprI32SConvertSatF32:
+ Fcvtzs(dst.gp().W(), src.fp().S());
+ return true;
+ case kExprI32UConvertSatF32:
+ Fcvtzu(dst.gp().W(), src.fp().S());
+ return true;
+ case kExprI32SConvertSatF64:
+ Fcvtzs(dst.gp().W(), src.fp().D());
+ return true;
+ case kExprI32UConvertSatF64:
+ Fcvtzu(dst.gp().W(), src.fp().D());
+ return true;
+ case kExprI64SConvertSatF32:
+ Fcvtzs(dst.gp().X(), src.fp().S());
+ return true;
+ case kExprI64UConvertSatF32:
+ Fcvtzu(dst.gp().X(), src.fp().S());
+ return true;
+ case kExprI64SConvertSatF64:
+ Fcvtzs(dst.gp().X(), src.fp().D());
+ return true;
+ case kExprI64UConvertSatF64:
+ Fcvtzu(dst.gp().X(), src.fp().D());
+ return true;
case kExprI32ReinterpretF32:
Fmov(dst.gp().W(), src.fp().S());
return true;
@@ -1102,6 +1409,70 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
}
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ MemOperand src_op =
+ liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V8H(), dst.fp().V8B());
+ } else if (memtype == MachineType::Uint8()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V8H(), dst.fp().V8B());
+ } else if (memtype == MachineType::Int16()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V4S(), dst.fp().V4H());
+ } else if (memtype == MachineType::Uint16()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V4S(), dst.fp().V4H());
+ } else if (memtype == MachineType::Int32()) {
+ Ldr(dst.fp().D(), src_op);
+ Sxtl(dst.fp().V2D(), dst.fp().V2S());
+ } else if (memtype == MachineType::Uint32()) {
+ Ldr(dst.fp().D(), src_op);
+ Uxtl(dst.fp().V2D(), dst.fp().V2S());
+ }
+ } else {
+ // ld1r only allows no offset or post-index, so emit an add.
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (src_op.IsRegisterOffset()) {
+ // We have 2 tmp gps, so it's okay to acquire 1 more here, and actually
+ // doesn't matter if we acquire the same one.
+ Register tmp = temps.AcquireX();
+ Add(tmp, src_op.base(), src_op.regoffset().X());
+ src_op = MemOperand(tmp.X(), 0);
+ } else if (src_op.IsImmediateOffset() && src_op.offset() != 0) {
+ Register tmp = temps.AcquireX();
+ Add(tmp, src_op.base(), src_op.offset());
+ src_op = MemOperand(tmp.X(), 0);
+ }
+
+ if (memtype == MachineType::Int8()) {
+ ld1r(dst.fp().V16B(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ ld1r(dst.fp().V8H(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ ld1r(dst.fp().V4S(), src_op);
+ } else if (memtype == MachineType::Int64()) {
+ ld1r(dst.fp().V2D(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Tbl(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B());
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V2D(), src.fp().D(), 0);
@@ -1262,12 +1633,42 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
+ Shl(dst.fp().V2D(), lhs.fp().V2D(), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat2D, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs.gp(), kFormat2D);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat2D,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V2D(), lhs.fp().V2D(), rhs);
}
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1327,14 +1728,69 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
Neg(dst.fp().V4S(), src.fp().V4S());
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ Sshr(tmp.V4S(), src.fp().V4S(), 31);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Addv(tmp.S(), tmp.V4S());
+ Mov(dst.gp().W(), tmp.V4S(), 0);
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
+ Shl(dst.fp().V4S(), lhs.fp().V4S(), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat4S, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs.gp(), kFormat4S);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat4S,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V4S(), lhs.fp().V4S(), rhs);
}
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1408,14 +1864,69 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
Neg(dst.fp().V8H(), src.fp().V8H());
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ Sshr(tmp.V8H(), src.fp().V8H(), 15);
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Addv(tmp.H(), tmp.V8H());
+ Mov(dst.gp().W(), tmp.V8H(), 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
+ Shl(dst.fp().V8H(), lhs.fp().V8H(), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat8H, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs.gp(), kFormat8H);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat8H,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V8H(), lhs.fp().V8H(), rhs);
}
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1481,6 +1992,45 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
Umax(dst.fp().V8H(), lhs.fp().V8H(), rhs.fp().V8H());
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ VRegister src1 = lhs.fp();
+ VRegister src2 = rhs.fp();
+ VRegister temp = dst.fp();
+ if (dst == lhs || dst == rhs) {
+ // dst overlaps with lhs or rhs, so we need a temporary.
+ temp = GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ }
+
+ UseScratchRegisterScope scope(this);
+
+ if (src1 != src2 && !AreConsecutive(src1, src2)) {
+ // Tbl needs consecutive registers, which our scratch registers are.
+ src1 = scope.AcquireV(kFormat16B);
+ src2 = scope.AcquireV(kFormat16B);
+ DCHECK(AreConsecutive(src1, src2));
+ Mov(src1.Q(), lhs.fp().Q());
+ Mov(src2.Q(), rhs.fp().Q());
+ }
+
+ uint8_t mask = lhs == rhs ? 0x0F : 0x1F;
+ int64_t imms[2] = {0, 0};
+ for (int i = 7; i >= 0; i--) {
+ imms[0] = (imms[0] << 8) | (shuffle[i] & mask);
+ imms[1] = (imms[1] << 8) | (shuffle[i + 8] & mask);
+ }
+
+ Movi(temp.V16B(), imms[1], imms[0]);
+
+ if (src1 == src2) {
+ Tbl(dst.fp().V16B(), src1.V16B(), temp.V16B());
+ } else {
+ Tbl(dst.fp().V16B(), src1.V16B(), src2.V16B(), temp.V16B());
+ }
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Dup(dst.fp().V16B(), src.gp().W());
@@ -1513,14 +2063,71 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
Neg(dst.fp().V16B(), src.fp().V16B());
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ UseScratchRegisterScope temps(this);
+ VRegister tmp = temps.AcquireQ();
+ VRegister mask = temps.AcquireQ();
+
+ // Set i-th bit of each lane i. When AND with tmp, the lanes that
+ // are signed will have i-th bit set, unsigned will be 0.
+ Sshr(tmp.V16B(), src.fp().V16B(), 7);
+ Movi(mask.V2D(), 0x8040'2010'0804'0201);
+ And(tmp.V16B(), mask.V16B(), tmp.V16B());
+ Ext(mask.V16B(), tmp.V16B(), tmp.V16B(), 8);
+ Zip1(tmp.V16B(), tmp.V16B(), mask.V16B());
+ Addv(tmp.H(), tmp.V8H());
+ Mov(dst.gp().W(), tmp.V8H(), 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kLeft>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
+ Shl(dst.fp().V16B(), lhs.fp().V16B(), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat16B, liftoff::ShiftSign::kSigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShift<liftoff::ShiftDirection::kRight,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs.gp(), kFormat16B);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftRightImmediate<kFormat16B,
+ liftoff::ShiftSign::kUnsigned>(
+ this, dst.fp().V16B(), lhs.fp().V16B(), rhs);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1750,6 +2357,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
Bsl(dst.fp().V16B(), src1.fp().V16B(), src2.fp().V16B());
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzs(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Fcvtzu(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Scvtf(dst.fp().V4S(), src.fp().V4S());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Ucvtf(dst.fp().V4S(), src.fp().V4S());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 7a1d629bf2d..468450aef66 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
- return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
+ return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
}
inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
@@ -336,8 +336,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
movdqu(dst.fp(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -405,8 +403,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Movdqu(dst_op, src.fp());
break;
- default:
- UNREACHABLE();
}
}
@@ -494,7 +490,56 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uint32_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ if (type.value() == StoreType::kI64Store) {
+ bailout(kAtomics, "AtomicAdd");
+ return;
+ }
+
+ DCHECK_EQ(value, result);
+ DCHECK(!cache_state()->is_used(result));
+ bool is_64_bit_op = type.value_type() == kWasmI64;
+
+ Register value_reg = is_64_bit_op ? value.low_gp() : value.gp();
+ Register result_reg = is_64_bit_op ? result.low_gp() : result.gp();
+
+ bool is_byte_store = type.size() == 1;
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, value_reg, offset_reg);
+
+ // Ensure that {value_reg} is a valid register.
+ if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
+ Register safe_value_reg =
+ GetUnusedRegister(liftoff::kByteRegs, pinned).gp();
+ mov(safe_value_reg, value_reg);
+ value_reg = safe_value_reg;
+ }
+
+ Operand dst_op = Operand(dst_addr, offset_reg, times_1, offset_imm);
+ lock();
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ xadd_b(dst_op, value_reg);
+ movzx_b(result_reg, value_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ xadd_w(dst_op, value_reg);
+ movzx_w(result_reg, value_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ xadd(dst_op, value_reg);
+ if (value_reg != result_reg) {
+ mov(result_reg, value_reg);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (is_64_bit_op) {
+ xor_(result.high_gp(), result.high_gp());
+ }
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
@@ -1349,7 +1394,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,
// We need one tmp register to extract the sign bit. Get it right at the
// beginning, such that the spilling code is not accidentially jumped over.
- Register tmp = assm->GetUnusedRegister(kGpReg).gp();
+ Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp();
#define dop(name, ...) \
do { \
@@ -1412,9 +1457,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
static constexpr int kF32SignBit = 1 << 31;
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Movd(scratch, lhs); // move {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Movd(scratch2, rhs); // move {rhs} into {scratch2}.
@@ -1541,9 +1586,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
static constexpr int kF32SignBit = 1 << 31;
// On ia32, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
- Register scratch = GetUnusedRegister(kGpReg).gp();
- Register scratch2 =
- GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
+ LiftoffRegList pinned;
+ Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
+ Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
@@ -1612,6 +1657,7 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
}
namespace liftoff {
+#define __ assm->
// Used for float to int conversions. If the value in {converted_back} equals
// {src} afterwards, the conversion succeeded.
template <typename dst_type, typename src_type>
@@ -1621,21 +1667,21 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
LiftoffRegList pinned) {
if (std::is_same<double, src_type>::value) { // f64
if (std::is_signed<dst_type>::value) { // f64 -> i32
- assm->cvttsd2si(dst, src);
- assm->Cvtsi2sd(converted_back, dst);
+ __ cvttsd2si(dst, src);
+ __ Cvtsi2sd(converted_back, dst);
} else { // f64 -> u32
- assm->Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
- assm->Cvtui2sd(converted_back, dst,
- assm->GetUnusedRegister(kGpReg, pinned).gp());
+ __ Cvttsd2ui(dst, src, liftoff::kScratchDoubleReg);
+ __ Cvtui2sd(converted_back, dst,
+ __ GetUnusedRegister(kGpReg, pinned).gp());
}
} else { // f32
if (std::is_signed<dst_type>::value) { // f32 -> i32
- assm->cvttss2si(dst, src);
- assm->Cvtsi2ss(converted_back, dst);
+ __ cvttss2si(dst, src);
+ __ Cvtsi2ss(converted_back, dst);
} else { // f32 -> u32
- assm->Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
- assm->Cvtui2ss(converted_back, dst,
- assm->GetUnusedRegister(kGpReg, pinned).gp());
+ __ Cvttss2ui(dst, src, liftoff::kScratchDoubleReg);
+ __ Cvtui2ss(converted_back, dst,
+ __ GetUnusedRegister(kGpReg, pinned).gp());
}
}
}
@@ -1644,36 +1690,101 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout(kMissingCPUFeature, "no SSE4.1");
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
DoubleRegister rounded =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
DoubleRegister converted_back =
- pinned.set(assm->GetUnusedRegister(kFpReg, pinned)).fp();
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
if (std::is_same<double, src_type>::value) { // f64
- assm->roundsd(rounded, src, kRoundToZero);
+ __ roundsd(rounded, src, kRoundToZero);
} else { // f32
- assm->roundss(rounded, src, kRoundToZero);
+ __ roundss(rounded, src, kRoundToZero);
}
ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
converted_back, pinned);
if (std::is_same<double, src_type>::value) { // f64
- assm->ucomisd(converted_back, rounded);
+ __ ucomisd(converted_back, rounded);
} else { // f32
- assm->ucomiss(converted_back, rounded);
+ __ ucomiss(converted_back, rounded);
}
// Jump to trap if PF is 0 (one of the operands was NaN) or they are not
// equal.
- assm->j(parity_even, trap);
- assm->j(not_equal, trap);
+ __ j(parity_even, trap);
+ __ j(not_equal, trap);
return true;
}
+
+template <typename dst_type, typename src_type>
+inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label not_nan;
+ Label src_positive;
+
+ LiftoffRegList pinned = LiftoffRegList::ForRegs(src, dst);
+ DoubleRegister rounded =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+ DoubleRegister converted_back =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+ DoubleRegister zero_reg =
+ pinned.set(__ GetUnusedRegister(kFpReg, pinned)).fp();
+
+ if (std::is_same<double, src_type>::value) { // f64
+ __ roundsd(rounded, src, kRoundToZero);
+ } else { // f32
+ __ roundss(rounded, src, kRoundToZero);
+ }
+
+ ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
+ converted_back, pinned);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ ucomisd(converted_back, rounded);
+ } else { // f32
+ __ ucomiss(converted_back, rounded);
+ }
+
+ // Return 0 if PF is 0 (one of the operands was NaN)
+ __ j(parity_odd, &not_nan);
+ __ xor_(dst, dst);
+ __ jmp(&done);
+
+ __ bind(&not_nan);
+ // If rounding is as expected, return result
+ __ j(equal, &done);
+
+ __ Xorpd(zero_reg, zero_reg);
+
+ // if out-of-bounds, check if src is positive
+ if (std::is_same<double, src_type>::value) { // f64
+ __ ucomisd(src, zero_reg);
+ } else { // f32
+ __ ucomiss(src, zero_reg);
+ }
+ __ j(above, &src_positive);
+ __ mov(dst, Immediate(std::numeric_limits<dst_type>::min()));
+ __ jmp(&done);
+
+ __ bind(&src_positive);
+
+ __ mov(dst, Immediate(std::numeric_limits<dst_type>::max()));
+
+ __ bind(&done);
+ return true;
+}
+#undef __
} // namespace liftoff
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
@@ -1695,6 +1806,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertF64:
return liftoff::EmitTruncateFloatToInt<uint32_t, double>(this, dst.gp(),
src.fp(), trap);
+ case kExprI32SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, double>(
+ this, dst.gp(), src.fp());
case kExprI32ReinterpretF32:
Movd(dst.gp(), src.fp());
return true;
@@ -2017,8 +2140,164 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), shift);
}
}
+
+enum class ShiftSignedness { kSigned, kUnsigned };
+
+template <bool is_signed>
+void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ // Same algorithm is used for both signed and unsigned shifts, the only
+ // difference is the actual shift and pack in the end. This is the same
+ // algorithm as used in code-generator-ia32.cc
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+
+ // Unpack the bytes into words, do logical shifts, and repack.
+ assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
+ assm->Punpcklbw(dst.fp(), lhs.fp());
+ assm->mov(tmp, rhs.gp());
+ // Take shift value modulo 8.
+ assm->and_(tmp, 7);
+ assm->add(tmp, Immediate(8));
+ assm->Movd(tmp_simd, tmp);
+ if (is_signed) {
+ assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ tmp_simd);
+ assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
+ assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ tmp_simd);
+ assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
+ assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ assm->xor_(tmp, tmp);
+ assm->mov(dst.gp(), Immediate(1));
+ assm->Ptest(src.fp(), src.fp());
+ assm->cmov(zero, dst.gp(), tmp);
+}
+
+template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Register tmp =
+ assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst)).gp();
+ XMMRegister tmp_simd = liftoff::kScratchDoubleReg;
+ assm->mov(tmp, Immediate(1));
+ assm->xor_(dst.gp(), dst.gp());
+ assm->Pxor(tmp_simd, tmp_simd);
+ (assm->*pcmp)(tmp_simd, src.fp());
+ assm->Ptest(tmp_simd, tmp_simd);
+ assm->cmov(zero, dst.gp(), tmp);
+}
+
} // namespace liftoff
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
+ Operand src_op{src_addr, offset_reg, times_1,
+ static_cast<int32_t>(offset_imm)};
+ *protected_load_pc = pc_offset();
+
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Pmovsxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint8()) {
+ Pmovzxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ Pmovsxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint16()) {
+ Pmovzxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ Pmovsxdq(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint32()) {
+ Pmovzxdq(dst.fp(), src_op);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src_op, 0);
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ } else if (memtype == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src_op, 0);
+ Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
+ Punpcklqdq(dst.fp(), dst.fp());
+ } else if (memtype == MachineType::Int32()) {
+ Vbroadcastss(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int64()) {
+ Movddup(dst.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ // Prepare 16 byte aligned buffer for shuffle control mask.
+ mov(tmp.gp(), esp);
+ and_(esp, -16);
+ movups(liftoff::kScratchDoubleReg, lhs.fp());
+
+ for (int i = 3; i >= 0; i--) {
+ uint32_t mask = 0;
+ for (int j = 3; j >= 0; j--) {
+ uint8_t lane = shuffle[i * 4 + j];
+ mask <<= 8;
+ mask |= lane < kSimd128Size ? lane : 0x80;
+ }
+ push(Immediate(mask));
+ }
+ Pshufb(liftoff::kScratchDoubleReg, Operand(esp, 0));
+
+ for (int i = 3; i >= 0; i--) {
+ uint32_t mask = 0;
+ for (int j = 3; j >= 0; j--) {
+ uint8_t lane = shuffle[i * 4 + j];
+ mask <<= 8;
+ mask |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
+ }
+ push(Immediate(mask));
+ }
+ if (dst.fp() != rhs.fp()) {
+ movups(dst.fp(), rhs.fp());
+ }
+ Pshufb(dst.fp(), Operand(esp, 0));
+ Por(dst.fp(), liftoff::kScratchDoubleReg);
+ mov(esp, tmp.gp());
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister mask =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ TurboAssembler::Move(mask, uint32_t{0x70707070});
+ Pshufd(mask, mask, uint8_t{0x0});
+ Paddusb(mask, rhs.fp());
+ if (lhs != dst) {
+ Movaps(dst.fp(), lhs.fp());
+ }
+ Pshufb(dst.fp(), mask);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -2350,6 +2629,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovmskb(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
@@ -2381,7 +2675,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
+ LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -2399,6 +2693,43 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
Pand(dst.fp(), liftoff::kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
+ Punpcklbw(dst.fp(), lhs.fp());
+ uint8_t shift = (rhs & 7) + 8;
+ Psraw(liftoff::kScratchDoubleReg, shift);
+ Psraw(dst.fp(), shift);
+ Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = rhs & 7;
+ Psrlw(dst.fp(), lhs.fp(), byte{shift});
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ mov(tmp, mask);
+ Movd(liftoff::kScratchDoubleReg, tmp);
+ Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 0);
+ Pand(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
@@ -2541,6 +2872,24 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = liftoff::kScratchDoubleReg;
+ Packsswb(tmp, src.fp());
+ Pmovmskb(dst.gp(), tmp);
+ shr(dst.gp(), 8);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
@@ -2553,6 +2902,32 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsraw, &Assembler::psraw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsraw, &Assembler::psraw, 4>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlw, &Assembler::psrlw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 4>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
@@ -2639,6 +3014,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskps(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
@@ -2651,6 +3041,32 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrad, &Assembler::psrad, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrad, &Assembler::psrad, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrld, &Assembler::psrld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrld, &Assembler::psrld, 5>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
@@ -2723,6 +3139,56 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister shift = liftoff::kScratchDoubleReg;
+ XMMRegister tmp =
+ GetUnusedRegister(RegClass::kFpReg, LiftoffRegList::ForRegs(dst, lhs))
+ .fp();
+
+ // Take shift value modulo 64.
+ and_(rhs.gp(), Immediate(63));
+ Movd(shift, rhs.gp());
+
+ // Set up a mask [0x80000000,0,0x80000000,0].
+ Pcmpeqb(tmp, tmp);
+ Psllq(tmp, tmp, 63);
+
+ Psrlq(tmp, tmp, shift);
+ Psrlq(dst.fp(), lhs.fp(), shift);
+ Pxor(dst.fp(), tmp);
+ Psubq(dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ XMMRegister tmp = liftoff::kScratchDoubleReg;
+ int32_t shift = rhs & 63;
+
+ // Set up a mask [0x80000000,0,0x80000000,0].
+ Pcmpeqb(tmp, tmp);
+ Psllq(tmp, tmp, 63);
+
+ Psrlq(tmp, tmp, shift);
+ Psrlq(dst.fp(), lhs.fp(), shift);
+ Pxor(dst.fp(), tmp);
+ Psubq(dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlq, &Assembler::psrlq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2990,6 +3456,97 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
+ vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ movaps(liftoff::kScratchDoubleReg, src.fp());
+ cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ pand(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ // Set top bit if >= 0 (but not -0.0!).
+ Pxor(liftoff::kScratchDoubleReg, dst.fp());
+ // Convert to int.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Set top bit if >=0 is now < 0.
+ Pand(liftoff::kScratchDoubleReg, dst.fp());
+ Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ static constexpr RegClass tmp_rc = reg_class_for(ValueType::kS128);
+ DoubleRegister tmp =
+ GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(dst, src)).fp();
+ // NAN->0, negative->0.
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ maxps(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ // scratch: float representation of max_signed.
+ Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
+ uint8_t{1}); // 0x7fffffff
+ Cvtdq2ps(liftoff::kScratchDoubleReg,
+ liftoff::kScratchDoubleReg); // 0x4f000000
+ // tmp: convert (src-max_signed).
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ // Set negative lanes to 0.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubps(tmp, dst.fp(), liftoff::kScratchDoubleReg);
+ } else {
+ movaps(tmp, dst.fp());
+ subps(tmp, liftoff::kScratchDoubleReg);
+ }
+ Cmpleps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp);
+ Cvttps2dq(tmp, tmp);
+ Pxor(tmp, liftoff::kScratchDoubleReg);
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
+ Pmaxsd(tmp, liftoff::kScratchDoubleReg);
+ // Convert to int. Overflow lanes above max_signed will be 0x80000000.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Add (src-max_signed) for overflow lanes.
+ Paddd(dst.fp(), dst.fp(), tmp);
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2ps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg); // Zeros.
+ Pblendw(liftoff::kScratchDoubleReg, src.fp(),
+ uint8_t{0x55}); // Get lo 16 bits.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsubd(dst.fp(), src.fp(), liftoff::kScratchDoubleReg); // Get hi 16 bits.
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ psubd(dst.fp(), liftoff::kScratchDoubleReg);
+ }
+ Cvtdq2ps(liftoff::kScratchDoubleReg,
+ liftoff::kScratchDoubleReg); // Convert lo exactly.
+ Psrld(dst.fp(), dst.fp(), byte{1}); // Divide by 2 to get in unsigned range.
+ Cvtdq2ps(dst.fp(), dst.fp()); // Convert hi, exactly.
+ Addps(dst.fp(), dst.fp(), dst.fp()); // Double hi, exactly.
+ Addps(dst.fp(), dst.fp(),
+ liftoff::kScratchDoubleReg); // Add hi and lo, may round.
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3270,7 +3827,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
index 923d375064c..a8b40a7b462 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -82,35 +82,35 @@ class StackTransferRecipe {
DCHECK(load_dst_regs_.is_empty());
}
- void TransferStackSlot(const VarState& dst, const VarState& src) {
+ V8_INLINE void TransferStackSlot(const VarState& dst, const VarState& src) {
DCHECK_EQ(dst.type(), src.type());
- switch (dst.loc()) {
+ if (dst.is_reg()) {
+ LoadIntoRegister(dst.reg(), src, src.offset());
+ return;
+ }
+ if (dst.is_const()) {
+ DCHECK_EQ(dst.i32_const(), src.i32_const());
+ return;
+ }
+ DCHECK(dst.is_stack());
+ switch (src.loc()) {
case VarState::kStack:
- switch (src.loc()) {
- case VarState::kStack:
- if (src.offset() == dst.offset()) break;
- asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
- break;
- case VarState::kRegister:
- asm_->Spill(dst.offset(), src.reg(), src.type());
- break;
- case VarState::kIntConst:
- asm_->Spill(dst.offset(), src.constant());
- break;
+ if (src.offset() != dst.offset()) {
+ asm_->MoveStackValue(dst.offset(), src.offset(), src.type());
}
break;
case VarState::kRegister:
- LoadIntoRegister(dst.reg(), src, src.offset());
+ asm_->Spill(dst.offset(), src.reg(), src.type());
break;
case VarState::kIntConst:
- DCHECK_EQ(dst, src);
+ asm_->Spill(dst.offset(), src.constant());
break;
}
}
- void LoadIntoRegister(LiftoffRegister dst,
- const LiftoffAssembler::VarState& src,
- uint32_t src_offset) {
+ V8_INLINE void LoadIntoRegister(LiftoffRegister dst,
+ const LiftoffAssembler::VarState& src,
+ uint32_t src_offset) {
switch (src.loc()) {
case VarState::kStack:
LoadStackSlot(dst, src_offset, src.type());
@@ -466,7 +466,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source,
// they do not move). Try to keep register in registers, but avoid duplicates.
InitMergeRegion(this, source_begin, target_begin, num_locals, kKeepStackSlots,
kConstantsNotAllowed, kNoReuseRegisters, used_regs);
- // Sanity check: All the {used_regs} are really in use now.
+ // Consistency check: All the {used_regs} are really in use now.
DCHECK_EQ(used_regs, used_registers & used_regs);
// Last, initialize the section in between. Here, constants are allowed, but
@@ -510,24 +510,15 @@ LiftoffAssembler::~LiftoffAssembler() {
LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot,
LiftoffRegList pinned) {
- switch (slot.loc()) {
- case VarState::kStack: {
- LiftoffRegister reg =
- GetUnusedRegister(reg_class_for(slot.type()), pinned);
- Fill(reg, slot.offset(), slot.type());
- return reg;
- }
- case VarState::kRegister:
- return slot.reg();
- case VarState::kIntConst: {
- RegClass rc =
- kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
- LiftoffRegister reg = GetUnusedRegister(rc, pinned);
- LoadConstant(reg, slot.constant());
- return reg;
- }
+ if (slot.is_reg()) return slot.reg();
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.type()), pinned);
+ if (slot.is_const()) {
+ LoadConstant(reg, slot.constant());
+ } else {
+ DCHECK(slot.is_stack());
+ Fill(reg, slot.offset(), slot.type());
}
- UNREACHABLE();
+ return reg;
}
LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
@@ -535,7 +526,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
if (slot.is_reg()) {
return half == kLowWord ? slot.reg().low() : slot.reg().high();
}
- LiftoffRegister dst = GetUnusedRegister(kGpReg);
+ LiftoffRegister dst = GetUnusedRegister(kGpReg, {});
if (slot.is_stack()) {
FillI64Half(dst.gp(), slot.offset(), half);
return dst;
@@ -548,33 +539,39 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
return dst;
}
-LiftoffRegister LiftoffAssembler::PopToRegister(LiftoffRegList pinned) {
- DCHECK(!cache_state_.stack_state.empty());
- VarState slot = cache_state_.stack_state.back();
- if (slot.is_reg()) cache_state_.dec_used(slot.reg());
- cache_state_.stack_state.pop_back();
- return LoadToRegister(slot, pinned);
-}
-
LiftoffRegister LiftoffAssembler::PeekToRegister(int index,
LiftoffRegList pinned) {
DCHECK_LT(index, cache_state_.stack_state.size());
VarState& slot = cache_state_.stack_state.end()[-1 - index];
- if (slot.is_reg()) cache_state_.dec_used(slot.reg());
- LiftoffRegister reg = LoadToRegister(slot, pinned);
- if (!slot.is_reg()) {
- slot.MakeRegister(reg);
+ if (slot.is_reg()) {
+ cache_state_.dec_used(slot.reg());
+ return slot.reg();
}
+ LiftoffRegister reg = LoadToRegister(slot, pinned);
+ slot.MakeRegister(reg);
return reg;
}
void LiftoffAssembler::PrepareLoopArgs(int num) {
for (int i = 0; i < num; ++i) {
VarState& slot = cache_state_.stack_state.end()[-1 - i];
- if (!slot.is_const()) continue;
- RegClass rc =
- kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
- LiftoffRegister reg = GetUnusedRegister(rc);
+ if (slot.is_stack()) continue;
+ RegClass rc = reg_class_for(slot.type());
+ if (slot.is_reg()) {
+ if (cache_state_.get_use_count(slot.reg()) > 1) {
+ // If the register is used more than once, we cannot use it for the
+ // merge. Move it to an unused register instead.
+ LiftoffRegList pinned;
+ pinned.set(slot.reg());
+ LiftoffRegister dst_reg = GetUnusedRegister(rc, pinned);
+ Move(dst_reg, slot.reg(), slot.type());
+ cache_state_.dec_used(slot.reg());
+ cache_state_.inc_used(dst_reg);
+ slot.MakeRegister(dst_reg);
+ }
+ continue;
+ }
+ LiftoffRegister reg = GetUnusedRegister(rc, {});
LoadConstant(reg, slot.constant());
slot.MakeRegister(reg);
cache_state_.inc_used(reg);
@@ -724,6 +721,8 @@ void LiftoffAssembler::PrepareBuiltinCall(
PrepareStackTransfers(sig, call_descriptor, params.begin(), &stack_slots,
&stack_transfers, &param_regs);
// Create all the slots.
+ // Builtin stack parameters are pushed in reversed order.
+ stack_slots.Reverse();
stack_slots.Construct();
// Execute the stack transfers before filling the instance register.
stack_transfers.Execute();
@@ -742,13 +741,14 @@ void LiftoffAssembler::PrepareCall(const FunctionSig* sig,
constexpr size_t kInputShift = 1;
// Spill all cache slots which are not being used as parameters.
- // Don't update any register use counters, they will be reset later anyway.
- for (uint32_t idx = 0, end = cache_state_.stack_height() - num_params;
- idx < end; ++idx) {
- VarState& slot = cache_state_.stack_state[idx];
- if (!slot.is_reg()) continue;
- Spill(slot.offset(), slot.reg(), slot.type());
- slot.MakeStack();
+ for (VarState* it = cache_state_.stack_state.end() - 1 - num_params;
+ it >= cache_state_.stack_state.begin() &&
+ !cache_state_.used_registers.is_empty();
+ --it) {
+ if (!it->is_reg()) continue;
+ Spill(it->offset(), it->reg(), it->type());
+ cache_state_.dec_used(it->reg());
+ it->MakeStack();
}
LiftoffStackSlots stack_slots(this);
@@ -859,6 +859,10 @@ void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
// Use the {StackTransferRecipe} to move pairs, as the registers in the
// pairs might overlap.
StackTransferRecipe(this).MoveRegister(dst, src, type);
+ } else if (kNeedS128RegPair && dst.is_fp_pair()) {
+ // Calling low_fp is fine, Move will automatically check the type and
+ // convert this FP to its SIMD register, and use a SIMD move.
+ Move(dst.low_fp(), src.low_fp(), type);
} else if (dst.is_gp()) {
Move(dst.gp(), src.gp(), type);
} else {
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 3377990496f..aad75b18597 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -56,20 +56,6 @@ class LiftoffAssembler : public TurboAssembler {
DCHECK(type_ == kWasmI32 || type_ == kWasmI64);
}
- bool operator==(const VarState& other) const {
- if (loc_ != other.loc_) return false;
- if (type_ != other.type_) return false;
- switch (loc_) {
- case kStack:
- return true;
- case kRegister:
- return reg_ == other.reg_;
- case kIntConst:
- return i32_const_ == other.i32_const_;
- }
- UNREACHABLE();
- }
-
bool is_stack() const { return loc_ == kStack; }
bool is_gp_reg() const { return loc_ == kRegister && reg_.is_gp(); }
bool is_fp_reg() const { return loc_ == kRegister && reg_.is_fp(); }
@@ -140,6 +126,8 @@ class LiftoffAssembler : public TurboAssembler {
CacheState() = default;
CacheState(CacheState&&) V8_NOEXCEPT = default;
CacheState& operator=(CacheState&&) V8_NOEXCEPT = default;
+ // Disallow copy construction.
+ CacheState(const CacheState&) = delete;
base::SmallVector<VarState, 8> stack_state;
LiftoffRegList used_registers;
@@ -277,14 +265,23 @@ class LiftoffAssembler : public TurboAssembler {
private:
// Make the copy assignment operator private (to be used from {Split()}).
CacheState& operator=(const CacheState&) V8_NOEXCEPT = default;
- // Disallow copy construction.
- CacheState(const CacheState&) = delete;
};
explicit LiftoffAssembler(std::unique_ptr<AssemblerBuffer>);
~LiftoffAssembler() override;
- LiftoffRegister PopToRegister(LiftoffRegList pinned = {});
+ LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
+
+ LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) {
+ DCHECK(!cache_state_.stack_state.empty());
+ VarState slot = cache_state_.stack_state.back();
+ cache_state_.stack_state.pop_back();
+ if (slot.is_reg()) {
+ cache_state_.dec_used(slot.reg());
+ return slot.reg();
+ }
+ return LoadToRegister(slot, pinned);
+ }
// Returns the register which holds the value of stack slot {index}. If the
// value is not stored in a register yet, a register is allocated for it. The
@@ -340,7 +337,7 @@ class LiftoffAssembler : public TurboAssembler {
// possible.
LiftoffRegister GetUnusedRegister(
RegClass rc, std::initializer_list<LiftoffRegister> try_first,
- LiftoffRegList pinned = {}) {
+ LiftoffRegList pinned) {
for (LiftoffRegister reg : try_first) {
DCHECK_EQ(reg.reg_class(), rc);
if (cache_state_.is_free(reg)) return reg;
@@ -349,7 +346,7 @@ class LiftoffAssembler : public TurboAssembler {
}
// Get an unused register for class {rc}, potentially spilling to free one.
- LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
+ LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
@@ -733,6 +730,15 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_f64_set_cond(Condition condition, Register dst,
DoubleRegister lhs, DoubleRegister rhs);
+ inline void LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type, LoadTransformationKind transform,
+ uint32_t* protected_load_pc);
+ inline void emit_s8x16_shuffle(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]);
+ inline void emit_s8x16_swizzle(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
inline void emit_i8x16_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_splat(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_splat(LiftoffRegister dst, LiftoffRegister src);
@@ -801,10 +807,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_s128_select(LiftoffRegister dst, LiftoffRegister src1,
LiftoffRegister src2, LiftoffRegister mask);
inline void emit_i8x16_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v8x16_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v8x16_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i8x16_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i8x16_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i8x16_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i8x16_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i8x16_add_saturate_s(LiftoffRegister dst,
@@ -832,10 +849,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i8x16_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v16x8_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v16x8_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i16x8_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i16x8_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i16x8_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i16x8_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i16x8_add_saturate_s(LiftoffRegister dst,
@@ -863,10 +891,21 @@ class LiftoffAssembler : public TurboAssembler {
inline void emit_i16x8_max_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_neg(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v32x4_anytrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_v32x4_alltrue(LiftoffRegister dst, LiftoffRegister src);
+ inline void emit_i32x4_bitmask(LiftoffRegister dst, LiftoffRegister src);
inline void emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i32x4_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i32x4_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i32x4_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -886,6 +925,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs);
+ inline void emit_i64x2_shr_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_shri_s(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
+ inline void emit_i64x2_shr_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs);
+ inline void emit_i64x2_shri_u(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs);
inline void emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
inline void emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
@@ -922,6 +969,14 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffRegister rhs);
inline void emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs);
+ inline void emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src);
+ inline void emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src);
inline void emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs);
@@ -1074,7 +1129,6 @@ class LiftoffAssembler : public TurboAssembler {
}
private:
- LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned);
LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half);
uint32_t num_locals_ = 0;
@@ -1090,8 +1144,8 @@ class LiftoffAssembler : public TurboAssembler {
LiftoffBailoutReason bailout_reason_ = kSuccess;
const char* bailout_detail_ = nullptr;
- LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
- LiftoffRegList pinned);
+ V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates,
+ LiftoffRegList pinned);
// Spill one or two fp registers to get a pair of adjacent fp registers.
LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned);
};
@@ -1212,19 +1266,19 @@ class LiftoffStackSlots {
}
void Add(const LiftoffAssembler::VarState& src) { slots_.emplace_back(src); }
+ void Reverse() { std::reverse(slots_.begin(), slots_.end()); }
+
inline void Construct();
private:
struct Slot {
- // Allow move construction.
- Slot(Slot&&) V8_NOEXCEPT = default;
Slot(const LiftoffAssembler::VarState& src, uint32_t src_offset,
RegPairHalf half)
: src_(src), src_offset_(src_offset), half_(half) {}
explicit Slot(const LiftoffAssembler::VarState& src)
: src_(src), half_(kLowWord) {}
- const LiftoffAssembler::VarState src_;
+ LiftoffAssembler::VarState src_;
uint32_t src_offset_ = 0;
RegPairHalf half_;
};
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index 4d0d9dbceca..d2beb398c15 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -8,6 +8,7 @@
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
@@ -26,7 +27,7 @@
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -280,13 +281,15 @@ class LiftoffCompiler {
// For debugging, we need to spill registers before a trap, to be able to
// inspect them.
- struct SpilledRegistersBeforeTrap {
+ struct SpilledRegistersBeforeTrap : public ZoneObject {
struct Entry {
int offset;
LiftoffRegister reg;
ValueType type;
};
- std::vector<Entry> entries;
+ ZoneVector<Entry> entries;
+
+ explicit SpilledRegistersBeforeTrap(Zone* zone) : entries(zone) {}
};
struct OutOfLineCode {
@@ -298,13 +301,13 @@ class LiftoffCompiler {
uint32_t pc; // for trap handler.
// These two pointers will only be used for debug code:
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
- std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers;
+ SpilledRegistersBeforeTrap* spilled_registers;
// Named constructors:
static OutOfLineCode Trap(
WasmCode::RuntimeStubId s, WasmCodePosition pos, uint32_t pc,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder,
- std::unique_ptr<SpilledRegistersBeforeTrap> spilled_registers) {
+ SpilledRegistersBeforeTrap* spilled_registers) {
DCHECK_LT(0, pos);
return {{},
{},
@@ -313,13 +316,13 @@ class LiftoffCompiler {
{},
pc,
debug_sidetable_entry_builder,
- std::move(spilled_registers)};
+ spilled_registers};
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {{}, {}, WasmCode::kWasmStackGuard, pos,
- regs, 0, debug_sidetable_entry_builder, {}};
+ regs, 0, debug_sidetable_entry_builder, nullptr};
}
};
@@ -335,6 +338,9 @@ class LiftoffCompiler {
env_(env),
debug_sidetable_builder_(debug_sidetable_builder),
for_debugging_(for_debugging),
+ out_of_line_code_(compilation_zone),
+ source_position_table_builder_(compilation_zone),
+ protected_instructions_(compilation_zone),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
@@ -391,12 +397,10 @@ class LiftoffCompiler {
switch (type.kind()) {
case ValueType::kS128:
return kSimd;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- return kAnyRef;
- case ValueType::kExnRef:
- return kExceptionHandling;
+ case ValueType::kOptRef:
+ case ValueType::kRef:
+ // TODO(7748): Refine this.
+ return kRefTypes;
case ValueType::kBottom:
return kMultiValue;
default:
@@ -418,7 +422,7 @@ class LiftoffCompiler {
}
LiftoffBailoutReason bailout_reason = BailoutReasonForType(type);
EmbeddedVector<char, 128> buffer;
- SNPrintF(buffer, "%s %s", type.type_name(), context);
+ SNPrintF(buffer, "%s %s", type.type_name().c_str(), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
@@ -495,7 +499,7 @@ class LiftoffCompiler {
position, __ cache_state()->used_registers,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
- Register limit_address = __ GetUnusedRegister(kGpReg).gp();
+ Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
@@ -519,6 +523,15 @@ class LiftoffCompiler {
return false;
}
+ void TraceFunctionEntry(FullDecoder* decoder) {
+ DEBUG_CODE_COMMENT("trace function entry");
+ __ SpillAllRegisters();
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ CallRuntimeStub(WasmCode::kWasmTraceEnter);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+ }
+
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, kSupportedTypes, __ local_type(i),
@@ -593,6 +606,8 @@ class LiftoffCompiler {
// is never a position of any instruction in the function.
StackCheck(0);
+ if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
+
// If we are generating debug code, do check the "hook on function call"
// flag. If set, trigger a break.
if (V8_UNLIKELY(for_debugging_)) {
@@ -604,7 +619,7 @@ class LiftoffCompiler {
*next_breakpoint_ptr_ == decoder->position());
if (!has_breakpoint) {
DEBUG_CODE_COMMENT("check hook on function call");
- Register flag = __ GetUnusedRegister(kGpReg).gp();
+ Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
kSystemPointerSize);
Label no_break;
@@ -693,9 +708,10 @@ class LiftoffCompiler {
asm_.AbortCompilation();
}
- void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
+ V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
+ DCHECK(V8_UNLIKELY(for_debugging_));
bool breakpoint = false;
- if (V8_UNLIKELY(next_breakpoint_ptr_)) {
+ if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
@@ -720,6 +736,12 @@ class LiftoffCompiler {
}
// Potentially generate the source position to OSR to this instruction.
MaybeGenerateExtraSourcePos(decoder, !breakpoint);
+ }
+
+ void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
+ // Add a single check, so that the fast path can be inlined while
+ // {EmitDebuggingInfo} stays outlined.
+ if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
TraceCacheState(decoder);
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
@@ -923,10 +945,10 @@ class LiftoffCompiler {
constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {src})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {src}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <ValueType::Kind type>
@@ -936,9 +958,9 @@ class LiftoffCompiler {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
- ValueType sig_reps[] = {ValueType(type)};
+ ValueType sig_reps[] = {ValueType::Primitive(type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType(type), &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType::Primitive(type), &src, ext_ref);
};
EmitUnOp<type, type>(emit_with_c_fallback);
}
@@ -951,8 +973,9 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
- LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
- : __ GetUnusedRegister(dst_rc);
+ LiftoffRegister dst = src_rc == dst_rc
+ ? __ GetUnusedRegister(dst_rc, {src}, {})
+ : __ GetUnusedRegister(dst_rc, {});
DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
@@ -963,20 +986,22 @@ class LiftoffCompiler {
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
- ValueType sig_reps[] = {kWasmI32, ValueType(src_type)};
+ ValueType sig_reps[] = {kWasmI32, ValueType::Primitive(src_type)};
FunctionSig sig(1, 1, sig_reps);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
- GenerateCCall(dst_regs, &sig, ValueType(dst_type), &src, ext_ref);
+ GenerateCCall(dst_regs, &sig, ValueType::Primitive(dst_type), &src,
+ ext_ref);
__ emit_cond_jump(kEqual, trap, kWasmI32, ret_reg.gp());
} else {
- ValueType sig_reps[] = {ValueType(src_type)};
+ ValueType sig_reps[] = {ValueType::Primitive(src_type)};
FunctionSig sig(0, 1, sig_reps);
- GenerateCCall(&dst, &sig, ValueType(dst_type), &src, ext_ref);
+ GenerateCCall(&dst, &sig, ValueType::Primitive(dst_type), &src,
+ ext_ref);
}
}
- __ PushRegister(ValueType(dst_type), dst);
+ __ PushRegister(ValueType::Primitive(dst_type), dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
@@ -1088,14 +1113,22 @@ class LiftoffCompiler {
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
- case kExprI32SConvertSatF32:
- case kExprI32UConvertSatF32:
- case kExprI32SConvertSatF64:
- case kExprI32UConvertSatF64:
- case kExprI64SConvertSatF32:
- case kExprI64UConvertSatF32:
- case kExprI64SConvertSatF64:
- case kExprI64UConvertSatF64:
+ CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap)
+ CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32,
+ &ExternalReference::wasm_float32_to_int64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32,
+ &ExternalReference::wasm_float32_to_uint64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64,
+ &ExternalReference::wasm_float64_to_int64_sat,
+ kNoTrap)
+ CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64,
+ &ExternalReference::wasm_float64_to_uint64_sat,
+ kNoTrap)
return unsupported(decoder, kNonTrappingFloatToInt,
WasmOpcodes::OpcodeName(opcode));
default:
@@ -1122,11 +1155,11 @@ class LiftoffCompiler {
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fnImm, dst, lhs, imm);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
} else {
// The RHS was not an immediate.
EmitBinOp<src_type, result_type>(fn);
@@ -1141,13 +1174,13 @@ class LiftoffCompiler {
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs, rhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1483,34 +1516,34 @@ class LiftoffCompiler {
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
} else {
- LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
+ LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
- LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
+ LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
void RefNull(FullDecoder* decoder, Value* result) {
- unsupported(decoder, kAnyRef, "ref_null");
+ unsupported(decoder, kRefTypes, "ref_null");
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
- unsupported(decoder, kAnyRef, "func");
+ unsupported(decoder, kRefTypes, "func");
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
- unsupported(decoder, kAnyRef, "ref.as_non_null");
+ unsupported(decoder, kRefTypes, "ref.as_non_null");
}
void Drop(FullDecoder* decoder, const Value& value) {
@@ -1520,7 +1553,44 @@ class LiftoffCompiler {
__ cache_state()->stack_state.pop_back();
}
+ void TraceFunctionExit(FullDecoder* decoder) {
+ DEBUG_CODE_COMMENT("trace function exit");
+ // Before making the runtime call, spill all cache registers.
+ __ SpillAllRegisters();
+ LiftoffRegList pinned;
+ // Get a register to hold the stack slot for the return value.
+ LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ __ AllocateStackSlot(info.gp(), sizeof(int64_t));
+
+ // Store the return value if there is exactly one. Multiple return values
+ // are not handled yet.
+ size_t num_returns = decoder->sig_->return_count();
+ if (num_returns == 1) {
+ ValueType return_type = decoder->sig_->GetReturn(0);
+ LiftoffRegister return_reg =
+ __ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
+ __ Store(info.gp(), no_reg, 0, return_reg,
+ StoreType::ForValueType(return_type), pinned);
+ }
+ // Put the parameter in its place.
+ WasmTraceExitDescriptor descriptor;
+ DCHECK_EQ(0, descriptor.GetStackParameterCount());
+ DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
+ Register param_reg = descriptor.GetRegisterParameter(0);
+ if (info.gp() != param_reg) {
+ __ Move(param_reg, info.gp(), LiftoffAssembler::kWasmIntPtr);
+ }
+
+ source_position_table_builder_.AddPosition(
+ __ pc_offset(), SourcePosition(decoder->position()), false);
+ __ CallRuntimeStub(WasmCode::kWasmTraceExit);
+ safepoint_table_builder_.DefineSafepoint(&asm_, Safepoint::kNoLazyDeopt);
+
+ __ DeallocateStackSlot(sizeof(int64_t));
+ }
+
void ReturnImpl(FullDecoder* decoder) {
+ if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
DEBUG_CODE_COMMENT("leave frame");
@@ -1546,7 +1616,7 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(imm.type);
- LiftoffRegister reg = __ GetUnusedRegister(rc);
+ LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg);
break;
@@ -1570,7 +1640,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
- LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
+ LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
@@ -1607,9 +1677,19 @@ class LiftoffCompiler {
LocalSet(imm.index, true);
}
+ void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ // TODO(7748): Introduce typed functions bailout reason
+ unsupported(decoder, kGC, "let");
+ }
+
+ void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
+ // TODO(7748): Introduce typed functions bailout reason
+ unsupported(decoder, kGC, "let");
+ }
+
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
- Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
+ Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
@@ -1652,12 +1732,12 @@ class LiftoffCompiler {
void TableGet(FullDecoder* decoder, const Value& index, Value* result,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kAnyRef, "table_get");
+ unsupported(decoder, kRefTypes, "table_get");
}
void TableSet(FullDecoder* decoder, const Value& index, const Value& value,
const TableIndexImmediate<validate>& imm) {
- unsupported(decoder, kAnyRef, "table_set");
+ unsupported(decoder, kRefTypes, "table_set");
}
void Unreachable(FullDecoder* decoder) {
@@ -1675,8 +1755,8 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
- LiftoffRegister dst =
- __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
+ LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
+ {true_value, false_value}, {});
__ PushRegister(type, dst);
// Now emit the actual code to move either {true_value} or {false_value}
@@ -1819,11 +1899,12 @@ class LiftoffCompiler {
__ cache_state()->Steal(c->else_state->state);
}
- std::unique_ptr<SpilledRegistersBeforeTrap> GetSpilledRegistersBeforeTrap() {
- if (V8_LIKELY(!for_debugging_)) return nullptr;
+ SpilledRegistersBeforeTrap* GetSpilledRegistersBeforeTrap() {
+ DCHECK(for_debugging_);
// If we are generating debugging code, we really need to spill all
// registers to make them inspectable when stopping at the trap.
- auto spilled = std::make_unique<SpilledRegistersBeforeTrap>();
+ auto* spilled =
+ new (compilation_zone_) SpilledRegistersBeforeTrap(compilation_zone_);
for (uint32_t i = 0, e = __ cache_state()->stack_height(); i < e; ++i) {
auto& slot = __ cache_state()->stack_state[i];
if (!slot.is_reg()) continue;
@@ -1840,7 +1921,8 @@ class LiftoffCompiler {
out_of_line_code_.push_back(OutOfLineCode::Trap(
stub, position, pc,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling),
- GetSpilledRegistersBeforeTrap()));
+ V8_UNLIKELY(for_debugging_) ? GetSpilledRegistersBeforeTrap()
+ : nullptr));
return out_of_line_code_.back().label.get();
}
@@ -1852,7 +1934,7 @@ class LiftoffCompiler {
uint32_t offset, Register index, LiftoffRegList pinned,
ForceCheck force_check) {
const bool statically_oob =
- !base::IsInBounds(offset, access_size, env_->max_memory_size);
+ !base::IsInBounds<uint64_t>(offset, access_size, env_->max_memory_size);
if (!force_check && !statically_oob &&
(!FLAG_wasm_bounds_checks || env_->use_trap_handler)) {
@@ -1868,10 +1950,7 @@ class LiftoffCompiler {
if (statically_oob) {
__ emit_jump(trap_label);
- Control* current_block = decoder->control_at(0);
- if (current_block->reachable()) {
- current_block->reachability = kSpecOnlyReachable;
- }
+ decoder->SetSucceedingCodeDynamicallyUnreachable();
return true;
}
@@ -2033,11 +2112,54 @@ class LiftoffCompiler {
offset, decoder->position());
}
}
+
void LoadTransform(FullDecoder* decoder, LoadType type,
LoadTransformationKind transform,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
- unsupported(decoder, kSimd, "simd");
+ // LoadTransform requires SIMD support, so check for it here. If
+ // unsupported, bailout and let TurboFan lower the code.
+ if (!CheckSupportedType(decoder, kSupportedTypes, kWasmS128,
+ "LoadTransform")) {
+ return;
+ }
+
+ LiftoffRegList pinned;
+ Register index = pinned.set(__ PopToRegister()).gp();
+ // For load splats, LoadType is the size of the load, and for load
+ // extends, LoadType is the size of the lane, and it always loads 8 bytes.
+ uint32_t access_size =
+ transform == LoadTransformationKind::kExtend ? 8 : type.size();
+ if (BoundsCheckMem(decoder, access_size, imm.offset, index, pinned,
+ kDontForceCheck)) {
+ return;
+ }
+
+ uint32_t offset = imm.offset;
+ index = AddMemoryMasking(index, &offset, &pinned);
+ DEBUG_CODE_COMMENT("LoadTransform from memory");
+ Register addr = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
+ LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
+ uint32_t protected_load_pc = 0;
+ __ LoadTransform(value, addr, index, offset, type, transform,
+ &protected_load_pc);
+
+ if (env_->use_trap_handler) {
+ AddOutOfLineTrap(decoder->position(),
+ WasmCode::kThrowWasmTrapMemOutOfBounds,
+ protected_load_pc);
+ }
+ __ PushRegister(ValueType::Primitive(kS128), value);
+
+ if (FLAG_trace_wasm_memory) {
+ // Again load extend is different.
+ MachineRepresentation mem_rep =
+ transform == LoadTransformationKind::kExtend
+ ? MachineRepresentation::kWord64
+ : type.mem_type().representation();
+ TraceMemoryOperation(false, mem_rep, index, offset, decoder->position());
+ }
}
void StoreMem(FullDecoder* decoder, StoreType type,
@@ -2075,7 +2197,7 @@ class LiftoffCompiler {
}
void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
- Register mem_size = __ GetUnusedRegister(kGpReg).gp();
+ Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
@@ -2184,7 +2306,7 @@ class LiftoffCompiler {
const CallIndirectImmediate<validate>& imm,
const Value args[], Value returns[]) {
if (imm.table_index != 0) {
- return unsupported(decoder, kAnyRef, "table index != 0");
+ return unsupported(decoder, kRefTypes, "table index != 0");
}
for (ValueType ret : imm.sig->returns()) {
if (!CheckSupportedType(decoder, kSupportedTypes, ret, "return")) {
@@ -2326,7 +2448,7 @@ class LiftoffCompiler {
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
- unsupported(decoder, kAnyRef, "br_on_null");
+ unsupported(decoder, kRefTypes, "br_on_null");
}
template <ValueType::Kind src_type, ValueType::Kind result_type,
@@ -2344,9 +2466,9 @@ class LiftoffCompiler {
src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src3},
LiftoffRegList::ForRegs(src1, src2))
- : __ GetUnusedRegister(result_rc);
+ : __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <typename EmitFn, typename EmitFnImm>
@@ -2360,14 +2482,14 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
- LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});
CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
@@ -2380,6 +2502,8 @@ class LiftoffCompiler {
return unsupported(decoder, kSimd, "simd");
}
switch (opcode) {
+ case wasm::kExprS8x16Swizzle:
+ return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_s8x16_swizzle);
case wasm::kExprI8x16Splat:
return EmitUnOp<kI32, kS128>(&LiftoffAssembler::emit_i8x16_splat);
case wasm::kExprI16x8Splat:
@@ -2500,9 +2624,21 @@ class LiftoffCompiler {
return EmitTerOp<kS128, kS128>(&LiftoffAssembler::emit_s128_select);
case wasm::kExprI8x16Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_neg);
+ case wasm::kExprV8x16AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_anytrue);
+ case wasm::kExprV8x16AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v8x16_alltrue);
+ case wasm::kExprI8x16BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i8x16_bitmask);
case wasm::kExprI8x16Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shl,
&LiftoffAssembler::emit_i8x16_shli);
+ case wasm::kExprI8x16ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_s,
+ &LiftoffAssembler::emit_i8x16_shri_s);
+ case wasm::kExprI8x16ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i8x16_shr_u,
+ &LiftoffAssembler::emit_i8x16_shri_u);
case wasm::kExprI8x16Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_add);
case wasm::kExprI8x16AddSaturateS:
@@ -2531,9 +2667,21 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i8x16_max_u);
case wasm::kExprI16x8Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_neg);
+ case wasm::kExprV16x8AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_anytrue);
+ case wasm::kExprV16x8AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v16x8_alltrue);
+ case wasm::kExprI16x8BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i16x8_bitmask);
case wasm::kExprI16x8Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shl,
&LiftoffAssembler::emit_i16x8_shli);
+ case wasm::kExprI16x8ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_s,
+ &LiftoffAssembler::emit_i16x8_shri_s);
+ case wasm::kExprI16x8ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i16x8_shr_u,
+ &LiftoffAssembler::emit_i16x8_shri_u);
case wasm::kExprI16x8Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_add);
case wasm::kExprI16x8AddSaturateS:
@@ -2562,9 +2710,21 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i16x8_max_u);
case wasm::kExprI32x4Neg:
return EmitUnOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_neg);
+ case wasm::kExprV32x4AnyTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_anytrue);
+ case wasm::kExprV32x4AllTrue:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_v32x4_alltrue);
+ case wasm::kExprI32x4BitMask:
+ return EmitUnOp<kS128, kI32>(&LiftoffAssembler::emit_i32x4_bitmask);
case wasm::kExprI32x4Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shl,
&LiftoffAssembler::emit_i32x4_shli);
+ case wasm::kExprI32x4ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_s,
+ &LiftoffAssembler::emit_i32x4_shri_s);
+ case wasm::kExprI32x4ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i32x4_shr_u,
+ &LiftoffAssembler::emit_i32x4_shri_u);
case wasm::kExprI32x4Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i32x4_add);
case wasm::kExprI32x4Sub:
@@ -2584,6 +2744,12 @@ class LiftoffCompiler {
case wasm::kExprI64x2Shl:
return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shl,
&LiftoffAssembler::emit_i64x2_shli);
+ case wasm::kExprI64x2ShrS:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_s,
+ &LiftoffAssembler::emit_i64x2_shri_s);
+ case wasm::kExprI64x2ShrU:
+ return EmitSimdShiftOp(&LiftoffAssembler::emit_i64x2_shr_u,
+ &LiftoffAssembler::emit_i64x2_shri_u);
case wasm::kExprI64x2Add:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_i64x2_add);
case wasm::kExprI64x2Sub:
@@ -2626,6 +2792,18 @@ class LiftoffCompiler {
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_min);
case wasm::kExprF64x2Max:
return EmitBinOp<kS128, kS128>(&LiftoffAssembler::emit_f64x2_max);
+ case wasm::kExprI32x4SConvertF32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_sconvert_f32x4);
+ case wasm::kExprI32x4UConvertF32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_i32x4_uconvert_f32x4);
+ case wasm::kExprF32x4SConvertI32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f32x4_sconvert_i32x4);
+ case wasm::kExprF32x4UConvertI32x4:
+ return EmitUnOp<kS128, kS128>(
+ &LiftoffAssembler::emit_f32x4_uconvert_i32x4);
case wasm::kExprI8x16SConvertI16x8:
return EmitBinOp<kS128, kS128>(
&LiftoffAssembler::emit_i8x16_sconvert_i16x8);
@@ -2689,10 +2867,10 @@ class LiftoffCompiler {
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
- ? __ GetUnusedRegister(result_rc, {lhs})
- : __ GetUnusedRegister(result_rc);
+ ? __ GetUnusedRegister(result_rc, {lhs}, {})
+ : __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
- __ PushRegister(ValueType(result_type), dst);
+ __ PushRegister(ValueType::Primitive(result_type), dst);
}
template <ValueType::Kind src2_type, typename EmitFn>
@@ -2716,7 +2894,7 @@ class LiftoffCompiler {
(src2_rc == result_rc || pin_src2)
? __ GetUnusedRegister(result_rc, {src1},
LiftoffRegList::ForRegs(src2))
- : __ GetUnusedRegister(result_rc, {src1});
+ : __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
}
@@ -2770,8 +2948,15 @@ class LiftoffCompiler {
const Simd8x16ShuffleImmediate<validate>& imm,
const Value& input0, const Value& input1,
Value* result) {
- unsupported(decoder, kSimd, "simd");
+ static constexpr RegClass result_rc = reg_class_for(ValueType::kS128);
+ LiftoffRegister rhs = __ PopToRegister();
+ LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister dst = __ GetUnusedRegister(result_rc, {lhs, rhs}, {});
+
+ __ LiftoffAssembler::emit_s8x16_shuffle(dst, lhs, rhs, imm.shuffle);
+ __ PushRegister(kWasmS128, dst);
}
+
void Throw(FullDecoder* decoder, const ExceptionIndexImmediate<validate>&,
const Vector<Value>& args) {
unsupported(decoder, kExceptionHandling, "throw");
@@ -3369,17 +3554,17 @@ class LiftoffCompiler {
void TableGrow(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& value, const Value& delta, Value* result) {
- unsupported(decoder, kAnyRef, "table.grow");
+ unsupported(decoder, kRefTypes, "table.grow");
}
void TableSize(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
Value* result) {
- unsupported(decoder, kAnyRef, "table.size");
+ unsupported(decoder, kRefTypes, "table.size");
}
void TableFill(FullDecoder* decoder, const TableIndexImmediate<validate>& imm,
const Value& start, const Value& value, const Value& count) {
- unsupported(decoder, kAnyRef, "table.fill");
+ unsupported(decoder, kRefTypes, "table.fill");
}
void StructNew(FullDecoder* decoder,
@@ -3389,7 +3574,8 @@ class LiftoffCompiler {
unsupported(decoder, kGC, "struct.new");
}
void StructGet(FullDecoder* decoder, const Value& struct_obj,
- const FieldIndexImmediate<validate>& field, Value* result) {
+ const FieldIndexImmediate<validate>& field, bool is_signed,
+ Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "struct.get");
}
@@ -3408,7 +3594,7 @@ class LiftoffCompiler {
}
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
- Value* result) {
+ bool is_signed, Value* result) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "array.get");
}
@@ -3423,6 +3609,12 @@ class LiftoffCompiler {
unsupported(decoder, kGC, "array.len");
}
+ void RttCanon(FullDecoder* decoder, const TypeIndexImmediate<validate>& imm,
+ Value* result) {
+ // TODO(7748): Implement.
+ unsupported(decoder, kGC, "rtt.canon");
+ }
+
void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
// TODO(7748): Implement.
unsupported(decoder, kGC, "");
@@ -3484,9 +3676,9 @@ class LiftoffCompiler {
DebugSideTableBuilder* const debug_sidetable_builder_;
const ForDebugging for_debugging_;
LiftoffBailoutReason bailout_reason_ = kSuccess;
- std::vector<OutOfLineCode> out_of_line_code_;
+ ZoneVector<OutOfLineCode> out_of_line_code_;
SourcePositionTableBuilder source_position_table_builder_;
- std::vector<trap_handler::ProtectedInstructionData> protected_instructions_;
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
// Zone used to store information during compilation. The result will be
// stored independently, such that this zone can die together with the
// LiftoffCompiler after compilation.
@@ -3536,9 +3728,9 @@ WasmCompilationResult ExecuteLiftoffCompilation(
std::unique_ptr<DebugSideTable>* debug_sidetable,
Vector<int> extra_source_pos) {
int func_body_size = static_cast<int>(func_body.end - func_body.start);
- TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "ExecuteLiftoffCompilation", "func_index", func_index,
- "body_size", func_body_size);
+ TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileBaseline", "func_index", func_index, "body_size",
+ func_body_size);
Zone zone(allocator, "LiftoffCompilationZone");
auto call_descriptor = compiler::GetWasmCallDescriptor(&zone, func_body.sig);
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.h b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
index 434172c4cf1..bb2ddaf050c 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.h
@@ -38,7 +38,7 @@ enum LiftoffBailoutReason : int8_t {
kComplexOperation = 4,
// Unimplemented proposals:
kSimd = 5,
- kAnyRef = 6,
+ kRefTypes = 6,
kExceptionHandling = 7,
kMultiValue = 8,
kTailCall = 9,
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index f24c95008c9..0560a66dfe7 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
+ LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});
int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
@@ -1269,6 +1269,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
bailout(kUnsupportedArchitecture, "kExprI32UConvertF64");
return true;
}
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
case kExprI32ReinterpretF32:
mfc1(dst.gp(), src.fp());
return true;
@@ -1542,6 +1566,27 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "emit_s8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kSimd, "emit_i8x16_splat");
@@ -1739,6 +1784,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i8x16_neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_shl");
@@ -1749,6 +1809,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i8x16_add");
@@ -1817,6 +1899,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i16x8_neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_shl");
@@ -1827,6 +1924,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i16x8_add");
@@ -1895,6 +2014,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kSimd, "emit_i32x4_neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_shl");
@@ -1905,6 +2039,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i32x4_add");
@@ -1959,6 +2115,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "emit_i64x2_add");
@@ -2064,6 +2242,26 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "emit_f64x2_max");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2251,7 +2449,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 292f8032b8f..70946d3f6b5 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
- LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
@@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
- LiftoffRegister tmp = GetUnusedRegister(kGpReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
break;
@@ -1177,6 +1177,30 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprF64ReinterpretI64:
dmtc1(src.gp(), dst.fp());
return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
default:
return false;
}
@@ -1297,6 +1321,26 @@ inline FPUCondition ConditionToConditionCmpFPU(Condition condition,
UNREACHABLE();
}
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ Label all_false;
+ assm->BranchMSA(&all_false, MSA_BRANCH_V, all_zero, src.fp().toW(),
+ USE_DELAY_SLOT);
+ assm->li(dst.gp(), 0l);
+ assm->li(dst.gp(), 1);
+ assm->bind(&all_false);
+}
+
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src, MSABranchDF msa_branch_df) {
+ Label all_true;
+ assm->BranchMSA(&all_true, msa_branch_df, all_not_zero, src.fp().toW(),
+ USE_DELAY_SLOT);
+ assm->li(dst.gp(), 1);
+ assm->li(dst.gp(), 0l);
+ assm->bind(&all_true);
+}
+
} // namespace liftoff
void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst,
@@ -1357,6 +1401,112 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bind(&cont);
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Daddu(scratch, src_addr, offset_reg);
+ MemOperand src_op = MemOperand(scratch, offset_imm);
+ MSARegister dst_msa = dst.fp().toW();
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+
+ if (transform == LoadTransformationKind::kExtend) {
+ Ld(scratch, src_op);
+ if (memtype == MachineType::Int8()) {
+ fill_d(dst_msa, scratch);
+ clti_s_b(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_b(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint8()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_b(dst_msa, kSimd128RegZero, dst_msa);
+ } else if (memtype == MachineType::Int16()) {
+ fill_d(dst_msa, scratch);
+ clti_s_h(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_h(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint16()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_h(dst_msa, kSimd128RegZero, dst_msa);
+ } else if (memtype == MachineType::Int32()) {
+ fill_d(dst_msa, scratch);
+ clti_s_w(kSimd128ScratchReg, dst_msa, 0);
+ ilvr_w(dst_msa, kSimd128ScratchReg, dst_msa);
+ } else if (memtype == MachineType::Uint32()) {
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ fill_d(dst_msa, scratch);
+ ilvr_w(dst_msa, kSimd128RegZero, dst_msa);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Lb(scratch, src_op);
+ fill_b(dst_msa, scratch);
+ } else if (memtype == MachineType::Int16()) {
+ Lh(scratch, src_op);
+ fill_h(dst_msa, scratch);
+ } else if (memtype == MachineType::Int32()) {
+ Lw(scratch, src_op);
+ fill_w(dst_msa, scratch);
+ } else if (memtype == MachineType::Int64()) {
+ Ld(scratch, src_op);
+ fill_d(dst_msa, scratch);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+
+ uint64_t control_hi = 0;
+ uint64_t control_low = 0;
+ for (int i = 7; i >= 0; i--) {
+ control_hi <<= 8;
+ control_hi |= shuffle[i + 8];
+ control_low <<= 8;
+ control_low |= shuffle[i];
+ }
+
+ if (dst_msa == lhs_msa) {
+ move_v(kSimd128ScratchReg, lhs_msa);
+ lhs_msa = kSimd128ScratchReg;
+ } else if (dst_msa == rhs_msa) {
+ move_v(kSimd128ScratchReg, rhs_msa);
+ rhs_msa = kSimd128ScratchReg;
+ }
+
+ li(kScratchReg, control_low);
+ insert_d(dst_msa, 0, kScratchReg);
+ li(kScratchReg, control_hi);
+ insert_d(dst_msa, 1, kScratchReg);
+ vshf_b(dst_msa, rhs_msa, lhs_msa);
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ MSARegister dst_msa = dst.fp().toW();
+ MSARegister lhs_msa = lhs.fp().toW();
+ MSARegister rhs_msa = rhs.fp().toW();
+
+ if (dst == lhs) {
+ move_v(kSimd128ScratchReg, lhs_msa);
+ lhs_msa = kSimd128ScratchReg;
+ }
+ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ move_v(dst_msa, rhs_msa);
+ vshf_b(dst_msa, kSimd128RegZero, lhs_msa);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
fill_b(dst.fp().toW(), src.gp());
@@ -1567,6 +1717,32 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
subv_b(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_B);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_b(scratch0, src.fp().toW(), 7);
+ srli_h(scratch1, scratch0, 7);
+ or_v(scratch0, scratch0, scratch1);
+ srli_w(scratch1, scratch0, 14);
+ or_v(scratch0, scratch0, scratch1);
+ srli_d(scratch1, scratch0, 28);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ ilvev_b(scratch0, scratch1, scratch0);
+ copy_u_h(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_b(kSimd128ScratchReg, rhs.gp());
@@ -1578,6 +1754,30 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_b(kSimd128ScratchReg, rhs.gp());
+ sra_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_b(kSimd128ScratchReg, rhs.gp());
+ srl_b(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_b(dst.fp().toW(), lhs.fp().toW(), rhs & 7);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_b(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1647,6 +1847,31 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
subv_h(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_H);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_h(scratch0, src.fp().toW(), 15);
+ srli_w(scratch1, scratch0, 15);
+ or_v(scratch0, scratch0, scratch1);
+ srli_d(scratch1, scratch0, 30);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ slli_d(scratch1, scratch1, 4);
+ or_v(scratch0, scratch0, scratch1);
+ copy_u_b(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_h(kSimd128ScratchReg, rhs.gp());
@@ -1658,6 +1883,30 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_h(kSimd128ScratchReg, rhs.gp());
+ sra_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_h(kSimd128ScratchReg, rhs.gp());
+ srl_h(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_h(dst.fp().toW(), lhs.fp().toW(), rhs & 15);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_h(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1727,6 +1976,29 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
subv_w(dst.fp().toW(), kSimd128RegZero, src.fp().toW());
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue(this, dst, src, MSA_BRANCH_W);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ MSARegister scratch0 = kSimd128RegZero;
+ MSARegister scratch1 = kSimd128ScratchReg;
+ srli_w(scratch0, src.fp().toW(), 31);
+ srli_d(scratch1, scratch0, 31);
+ or_v(scratch0, scratch0, scratch1);
+ shf_w(scratch1, scratch0, 0x0E);
+ slli_d(scratch1, scratch1, 2);
+ or_v(scratch0, scratch0, scratch1);
+ copy_u_b(dst.gp(), scratch0, 0);
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
fill_w(kSimd128ScratchReg, rhs.gp());
@@ -1738,6 +2010,30 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_w(kSimd128ScratchReg, rhs.gp());
+ sra_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_w(kSimd128ScratchReg, rhs.gp());
+ srl_w(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_w(dst.fp().toW(), lhs.fp().toW(), rhs & 31);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_w(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1794,6 +2090,30 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
slli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_d(kSimd128ScratchReg, rhs.gp());
+ sra_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srai_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ fill_d(kSimd128ScratchReg, rhs.gp());
+ srl_d(dst.fp().toW(), lhs.fp().toW(), kSimd128ScratchReg);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ srli_d(dst.fp().toW(), lhs.fp().toW(), rhs & 63);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
addv_d(dst.fp().toW(), lhs.fp().toW(), rhs.fp().toW());
@@ -1959,6 +2279,26 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
bsel_v(dst_msa, scratch0, scratch1);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ftrunc_s_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ftrunc_u_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ffint_s_w(dst.fp().toW(), src.fp().toW());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ ffint_u_w(dst.fp().toW(), src.fp().toW());
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2197,7 +2537,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}
void LiftoffAssembler::CallTrapCallbackForTesting() {
- PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index e02ab95ae4b..920dda4fe6c 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -539,6 +539,20 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "Load transform unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2splat");
@@ -698,6 +712,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -736,6 +772,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
@@ -746,6 +797,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -795,6 +868,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
@@ -805,6 +893,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
@@ -887,6 +997,13 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "s8x16_shuffle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -910,6 +1027,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
@@ -920,6 +1052,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
@@ -1117,6 +1271,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_s128select");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 704fcb81d74..803358c97e7 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -543,6 +543,20 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst,
bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
}
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "Load transform unimplemented");
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kUnsupportedArchitecture, "emit_s8x16_swizzle");
+}
+
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_f64x2splat");
@@ -702,6 +716,28 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i64x2_shli");
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i64x2_shri_u");
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i64x2add");
@@ -740,6 +776,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i32x4neg");
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_anytrue");
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_bitmask");
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i32x4_shl");
@@ -750,6 +801,28 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i32x4_shli");
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i32x4_shri_u");
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i32x4add");
@@ -799,6 +872,21 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8neg");
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_anytrue");
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i16x8_bitmask");
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i16x8_shl");
@@ -809,6 +897,28 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i16x8_shli");
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i16x8_shri_u");
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i16x8add");
@@ -891,6 +1001,13 @@ void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i16x8extractlane_s");
}
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ bailout(kSimd, "s8x16_shuffle");
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
bailout(kUnsupportedArchitecture, "emit_i8x16splat");
@@ -920,6 +1037,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i8x16neg");
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_anytrue");
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "v8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i8x16_bitmask");
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kSimd, "i8x16_shl");
@@ -930,6 +1062,28 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
bailout(kSimd, "i8x16_shli");
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "i8x16_shri_u");
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
bailout(kUnsupportedArchitecture, "emit_i8x16add");
@@ -1149,6 +1303,26 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_s128select");
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "f32x4_uconvert_i32x4");
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index 7638c4f9cc0..83571a18f4c 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -8,7 +8,6 @@
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/codegen/assembler.h"
-#include "src/wasm/value-type.h"
namespace v8 {
namespace internal {
@@ -306,8 +305,6 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
case LoadType::kS128Load:
Movdqu(dst.fp(), src_op);
break;
- default:
- UNREACHABLE();
}
}
@@ -345,8 +342,6 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
case StoreType::kS128Store:
Movdqu(dst_op, src.fp());
break;
- default:
- UNREACHABLE();
}
}
@@ -1060,10 +1055,10 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
void (Assembler::*emit_shift)(Register)) {
// If dst is rcx, compute into the scratch register first, then move to rcx.
if (dst == rcx) {
- assm->Move(kScratchRegister, src, ValueType(type));
- if (amount != rcx) assm->Move(rcx, amount, ValueType(type));
+ assm->Move(kScratchRegister, src, ValueType::Primitive(type));
+ if (amount != rcx) assm->Move(rcx, amount, ValueType::Primitive(type));
(assm->*emit_shift)(kScratchRegister);
- assm->Move(rcx, kScratchRegister, ValueType(type));
+ assm->Move(rcx, kScratchRegister, ValueType::Primitive(type));
return;
}
@@ -1075,11 +1070,11 @@ inline void EmitShiftOperation(LiftoffAssembler* assm, Register dst,
src == rcx || assm->cache_state()->is_used(LiftoffRegister(rcx));
if (use_scratch) assm->movq(kScratchRegister, rcx);
if (src == rcx) src = kScratchRegister;
- assm->Move(rcx, amount, ValueType(type));
+ assm->Move(rcx, amount, ValueType::Primitive(type));
}
// Do the actual shift.
- if (dst != src) assm->Move(dst, src, ValueType(type));
+ if (dst != src) assm->Move(dst, src, ValueType::Primitive(type));
(assm->*emit_shift)(dst);
// Restore rcx if needed.
@@ -1620,6 +1615,7 @@ void LiftoffAssembler::emit_f64_sqrt(DoubleRegister dst, DoubleRegister src) {
}
namespace liftoff {
+#define __ assm->
// Used for float to int conversions. If the value in {converted_back} equals
// {src} afterwards, the conversion succeeded.
template <typename dst_type, typename src_type>
@@ -1628,29 +1624,29 @@ inline void ConvertFloatToIntAndBack(LiftoffAssembler* assm, Register dst,
DoubleRegister converted_back) {
if (std::is_same<double, src_type>::value) { // f64
if (std::is_same<int32_t, dst_type>::value) { // f64 -> i32
- assm->Cvttsd2si(dst, src);
- assm->Cvtlsi2sd(converted_back, dst);
+ __ Cvttsd2si(dst, src);
+ __ Cvtlsi2sd(converted_back, dst);
} else if (std::is_same<uint32_t, dst_type>::value) { // f64 -> u32
- assm->Cvttsd2siq(dst, src);
- assm->movl(dst, dst);
- assm->Cvtqsi2sd(converted_back, dst);
+ __ Cvttsd2siq(dst, src);
+ __ movl(dst, dst);
+ __ Cvtqsi2sd(converted_back, dst);
} else if (std::is_same<int64_t, dst_type>::value) { // f64 -> i64
- assm->Cvttsd2siq(dst, src);
- assm->Cvtqsi2sd(converted_back, dst);
+ __ Cvttsd2siq(dst, src);
+ __ Cvtqsi2sd(converted_back, dst);
} else {
UNREACHABLE();
}
} else { // f32
if (std::is_same<int32_t, dst_type>::value) { // f32 -> i32
- assm->Cvttss2si(dst, src);
- assm->Cvtlsi2ss(converted_back, dst);
+ __ Cvttss2si(dst, src);
+ __ Cvtlsi2ss(converted_back, dst);
} else if (std::is_same<uint32_t, dst_type>::value) { // f32 -> u32
- assm->Cvttss2siq(dst, src);
- assm->movl(dst, dst);
- assm->Cvtqsi2ss(converted_back, dst);
+ __ Cvttss2siq(dst, src);
+ __ movl(dst, dst);
+ __ Cvtqsi2ss(converted_back, dst);
} else if (std::is_same<int64_t, dst_type>::value) { // f32 -> i64
- assm->Cvttss2siq(dst, src);
- assm->Cvtqsi2ss(converted_back, dst);
+ __ Cvttss2siq(dst, src);
+ __ Cvtqsi2ss(converted_back, dst);
} else {
UNREACHABLE();
}
@@ -1661,7 +1657,7 @@ template <typename dst_type, typename src_type>
inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister src, Label* trap) {
if (!CpuFeatures::IsSupported(SSE4_1)) {
- assm->bailout(kMissingCPUFeature, "no SSE4.1");
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
return true;
}
CpuFeatureScope feature(assm, SSE4_1);
@@ -1670,24 +1666,143 @@ inline bool EmitTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
DoubleRegister converted_back = kScratchDoubleReg2;
if (std::is_same<double, src_type>::value) { // f64
- assm->Roundsd(rounded, src, kRoundToZero);
+ __ Roundsd(rounded, src, kRoundToZero);
} else { // f32
- assm->Roundss(rounded, src, kRoundToZero);
+ __ Roundss(rounded, src, kRoundToZero);
}
ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
converted_back);
if (std::is_same<double, src_type>::value) { // f64
- assm->Ucomisd(converted_back, rounded);
+ __ Ucomisd(converted_back, rounded);
} else { // f32
- assm->Ucomiss(converted_back, rounded);
+ __ Ucomiss(converted_back, rounded);
}
// Jump to trap if PF is 0 (one of the operands was NaN) or they are not
// equal.
- assm->j(parity_even, trap);
- assm->j(not_equal, trap);
+ __ j(parity_even, trap);
+ __ j(not_equal, trap);
+ return true;
+}
+
+template <typename dst_type, typename src_type>
+inline bool EmitSatTruncateFloatToInt(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label not_nan;
+ Label src_positive;
+
+ DoubleRegister rounded = kScratchDoubleReg;
+ DoubleRegister converted_back = kScratchDoubleReg2;
+ DoubleRegister zero_reg = kScratchDoubleReg;
+
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Roundsd(rounded, src, kRoundToZero);
+ } else { // f32
+ __ Roundss(rounded, src, kRoundToZero);
+ }
+
+ ConvertFloatToIntAndBack<dst_type, src_type>(assm, dst, rounded,
+ converted_back);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(converted_back, rounded);
+ } else { // f32
+ __ Ucomiss(converted_back, rounded);
+ }
+
+ // Return 0 if PF is 0 (one of the operands was NaN)
+ __ j(parity_odd, &not_nan);
+ __ xorl(dst, dst);
+ __ jmp(&done);
+
+ __ bind(&not_nan);
+ // If rounding is as expected, return result
+ __ j(equal, &done);
+
+ __ xorpd(zero_reg, zero_reg);
+
+ // if out-of-bounds, check if src is positive
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(src, zero_reg);
+ } else { // f32
+ __ Ucomiss(src, zero_reg);
+ }
+ __ j(above, &src_positive);
+ if (std::is_same<int32_t, dst_type>::value ||
+ std::is_same<uint32_t, dst_type>::value) { // i32
+ __ movl(
+ dst,
+ Immediate(static_cast<int32_t>(std::numeric_limits<dst_type>::min())));
+ } else if (std::is_same<int64_t, dst_type>::value) { // i64s
+ __ movq(dst, Immediate64(std::numeric_limits<dst_type>::min()));
+ } else {
+ UNREACHABLE();
+ }
+ __ jmp(&done);
+
+ __ bind(&src_positive);
+ if (std::is_same<int32_t, dst_type>::value ||
+ std::is_same<uint32_t, dst_type>::value) { // i32
+ __ movl(
+ dst,
+ Immediate(static_cast<int32_t>(std::numeric_limits<dst_type>::max())));
+ } else if (std::is_same<int64_t, dst_type>::value) { // i64s
+ __ movq(dst, Immediate64(std::numeric_limits<dst_type>::max()));
+ } else {
+ UNREACHABLE();
+ }
+
+ __ bind(&done);
+ return true;
+}
+
+template <typename src_type>
+inline bool EmitSatTruncateFloatToUInt64(LiftoffAssembler* assm, Register dst,
+ DoubleRegister src) {
+ if (!CpuFeatures::IsSupported(SSE4_1)) {
+ __ bailout(kMissingCPUFeature, "no SSE4.1");
+ return true;
+ }
+ CpuFeatureScope feature(assm, SSE4_1);
+
+ Label done;
+ Label neg_or_nan;
+ Label overflow;
+
+ DoubleRegister zero_reg = kScratchDoubleReg;
+
+ __ xorpd(zero_reg, zero_reg);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Ucomisd(src, zero_reg);
+ } else { // f32
+ __ Ucomiss(src, zero_reg);
+ }
+ // Check if NaN
+ __ j(parity_even, &neg_or_nan);
+ __ j(below, &neg_or_nan);
+ if (std::is_same<double, src_type>::value) { // f64
+ __ Cvttsd2uiq(dst, src, &overflow);
+ } else { // f32
+ __ Cvttss2uiq(dst, src, &overflow);
+ }
+ __ jmp(&done);
+
+ __ bind(&neg_or_nan);
+ __ movq(dst, zero_reg);
+ __ jmp(&done);
+
+ __ bind(&overflow);
+ __ movq(dst, Immediate64(std::numeric_limits<uint64_t>::max()));
+ __ bind(&done);
return true;
}
+#undef __
} // namespace liftoff
bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
@@ -1709,6 +1824,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
case kExprI32UConvertF64:
return liftoff::EmitTruncateFloatToInt<uint32_t, double>(this, dst.gp(),
src.fp(), trap);
+ case kExprI32SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI32SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int32_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI32UConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<uint32_t, double>(
+ this, dst.gp(), src.fp());
case kExprI32ReinterpretF32:
Movd(dst.gp(), src.fp());
return true;
@@ -1731,6 +1858,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
Cvttsd2uiq(dst.gp(), src.fp(), trap);
return true;
}
+ case kExprI64SConvertSatF32:
+ return liftoff::EmitSatTruncateFloatToInt<int64_t, float>(this, dst.gp(),
+ src.fp());
+ case kExprI64UConvertSatF32: {
+ return liftoff::EmitSatTruncateFloatToUInt64<float>(this, dst.gp(),
+ src.fp());
+ }
+ case kExprI64SConvertSatF64:
+ return liftoff::EmitSatTruncateFloatToInt<int64_t, double>(this, dst.gp(),
+ src.fp());
+ case kExprI64UConvertSatF64: {
+ return liftoff::EmitSatTruncateFloatToUInt64<double>(this, dst.gp(),
+ src.fp());
+ }
case kExprI64UConvertI32:
AssertZeroExtended(src.gp());
if (dst.gp() != src.gp()) movl(dst.gp(), src.gp());
@@ -1975,8 +2116,185 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
(assm->*sse_op)(dst.fp(), shift);
}
}
+
+template <bool is_signed>
+void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, LiftoffRegister rhs) {
+ // Same algorithm as the one in code-generator-x64.cc.
+ assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
+ assm->Punpcklbw(dst.fp(), lhs.fp());
+ // Prepare shift value
+ assm->movq(kScratchRegister, rhs.gp());
+ // Take shift value modulo 8.
+ assm->andq(kScratchRegister, Immediate(7));
+ assm->addq(kScratchRegister, Immediate(8));
+ assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
+ if (is_signed) {
+ assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
+ assm->Packsswb(dst.fp(), kScratchDoubleReg);
+ } else {
+ assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
+ assm->Packuswb(dst.fp(), kScratchDoubleReg);
+ }
+}
+
+// Can be used by both the immediate and register version of the shifts. psraq
+// is only available in AVX512, so we can't use it yet.
+template <typename ShiftOperand>
+void EmitI64x2ShrS(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister lhs, ShiftOperand rhs,
+ bool shift_is_rcx = false) {
+ bool restore_rcx = false;
+ Register backup = kScratchRegister2;
+ if (!shift_is_rcx) {
+ if (assm->cache_state()->is_used(LiftoffRegister(rcx))) {
+ restore_rcx = true;
+ assm->movq(backup, rcx);
+ }
+ assm->movl(rcx, rhs);
+ }
+
+ Register tmp = kScratchRegister;
+
+ assm->Pextrq(tmp, lhs.fp(), int8_t{0x0});
+ assm->sarq_cl(tmp);
+ assm->Pinsrq(dst.fp(), tmp, int8_t{0x0});
+
+ assm->Pextrq(tmp, lhs.fp(), int8_t{0x1});
+ assm->sarq_cl(tmp);
+ assm->Pinsrq(dst.fp(), tmp, int8_t{0x1});
+
+ // restore rcx.
+ if (restore_rcx) {
+ assm->movq(rcx, backup);
+ }
+}
+
+inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ assm->xorq(dst.gp(), dst.gp());
+ assm->Ptest(src.fp(), src.fp());
+ assm->setcc(not_equal, dst.gp());
+}
+
+template <void (TurboAssembler::*pcmp)(XMMRegister, XMMRegister)>
+inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = kScratchDoubleReg;
+ assm->xorq(dst.gp(), dst.gp());
+ assm->Pxor(tmp, tmp);
+ (assm->*pcmp)(tmp, src.fp());
+ assm->Ptest(tmp, tmp);
+ assm->setcc(equal, dst.gp());
+}
+
} // namespace liftoff
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uint32_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ if (emit_debug_code() && offset_reg != no_reg) {
+ AssertZeroExtended(offset_reg);
+ }
+ Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ *protected_load_pc = pc_offset();
+ MachineType memtype = type.mem_type();
+ if (transform == LoadTransformationKind::kExtend) {
+ if (memtype == MachineType::Int8()) {
+ Pmovsxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint8()) {
+ Pmovzxbw(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int16()) {
+ Pmovsxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint16()) {
+ Pmovzxwd(dst.fp(), src_op);
+ } else if (memtype == MachineType::Int32()) {
+ Pmovsxdq(dst.fp(), src_op);
+ } else if (memtype == MachineType::Uint32()) {
+ Pmovzxdq(dst.fp(), src_op);
+ }
+ } else {
+ DCHECK_EQ(LoadTransformationKind::kSplat, transform);
+ if (memtype == MachineType::Int8()) {
+ Pinsrb(dst.fp(), src_op, 0);
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pshufb(dst.fp(), kScratchDoubleReg);
+ } else if (memtype == MachineType::Int16()) {
+ Pinsrw(dst.fp(), src_op, 0);
+ Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
+ Punpcklqdq(dst.fp(), dst.fp());
+ } else if (memtype == MachineType::Int32()) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vbroadcastss(dst.fp(), src_op);
+ } else {
+ Movss(dst.fp(), src_op);
+ Shufps(dst.fp(), dst.fp(), byte{0});
+ }
+ } else if (memtype == MachineType::Int64()) {
+ Movddup(dst.fp(), src_op);
+ }
+ }
+}
+
+void LiftoffAssembler::emit_s8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16]) {
+ LiftoffRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs, rhs));
+ Movups(kScratchDoubleReg, lhs.fp());
+
+ uint64_t mask1[2] = {};
+ for (int i = 15; i >= 0; i--) {
+ uint8_t lane = shuffle[i];
+ int j = i >> 3;
+ mask1[j] <<= 8;
+ mask1[j] |= lane < kSimd128Size ? lane : 0x80;
+ }
+ TurboAssembler::Move(tmp_simd.fp(), mask1[0]);
+ movq(kScratchRegister, mask1[1]);
+ Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+ Pshufb(kScratchDoubleReg, tmp_simd.fp());
+
+ uint64_t mask2[2] = {};
+ for (int i = 15; i >= 0; i--) {
+ uint8_t lane = shuffle[i];
+ int j = i >> 3;
+ mask2[j] <<= 8;
+ mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80;
+ }
+ TurboAssembler::Move(tmp_simd.fp(), mask2[0]);
+ movq(kScratchRegister, mask2[1]);
+ Pinsrq(tmp_simd.fp(), kScratchRegister, int8_t{1});
+
+ if (dst.fp() != rhs.fp()) {
+ Movups(dst.fp(), rhs.fp());
+ }
+ Pshufb(dst.fp(), tmp_simd.fp());
+ Por(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_s8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ XMMRegister mask =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(lhs, rhs)).fp();
+ // Out-of-range indices should return 0, add 112 (0x70) so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ TurboAssembler::Move(mask, uint32_t{0x70707070});
+ Pshufd(mask, mask, uint8_t{0x0});
+ Paddusb(mask, rhs.fp());
+ if (lhs != dst) {
+ Movaps(dst.fp(), lhs.fp());
+ }
+ Pshufb(dst.fp(), mask);
+}
+
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
Movd(dst.fp(), src.gp());
@@ -2302,6 +2620,21 @@ void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v8x16_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pmovmskb(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
static constexpr RegClass tmp_simd_rc = reg_class_for(ValueType::kS128);
@@ -2347,6 +2680,48 @@ void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
Pand(dst.fp(), kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ Punpckhbw(kScratchDoubleReg, lhs.fp());
+ Punpcklbw(dst.fp(), lhs.fp());
+ uint8_t shift = (rhs & 7) + 8;
+ Psraw(kScratchDoubleReg, shift);
+ Psraw(dst.fp(), shift);
+ Packsswb(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = rhs & 7; // i.InputInt3(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsrlw(dst.fp(), lhs.fp(), byte{shift});
+ } else if (dst != lhs) {
+ Movaps(dst.fp(), lhs.fp());
+ psrlw(dst.fp(), byte{shift});
+ }
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ movl(kScratchRegister, Immediate(mask));
+ Movd(kScratchDoubleReg, kScratchRegister);
+ Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
+ Pand(dst.fp(), kScratchDoubleReg);
+}
+
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddb, &Assembler::paddb>(
@@ -2489,6 +2864,24 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v16x8_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ XMMRegister tmp = kScratchDoubleReg;
+ Packsswb(tmp, src.fp());
+ Pmovmskb(dst.gp(), tmp);
+ shrq(dst.gp(), Immediate(8));
+}
+
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpsllw, &Assembler::psllw, 4>(this, dst,
@@ -2501,6 +2894,32 @@ void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsraw, &Assembler::psraw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsraw, &Assembler::psraw, 4>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlw, &Assembler::psrlw, 4>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 4>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddw, &Assembler::paddw>(
@@ -2587,6 +3006,21 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
}
}
+void LiftoffAssembler::emit_v32x4_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAnyTrue(this, dst, src);
+}
+
+void LiftoffAssembler::emit_v32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src);
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Movmskps(dst.gp(), src.fp());
+}
+
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdShiftOp<&Assembler::vpslld, &Assembler::pslld, 5>(this, dst,
@@ -2599,6 +3033,32 @@ void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrad, &Assembler::psrad, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrad, &Assembler::psrad, 5>(
+ this, dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrld, &Assembler::psrld, 5>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrld, &Assembler::psrld, 5>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddd, &Assembler::paddd>(
@@ -2670,6 +3130,31 @@ void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
this, dst, lhs, rhs);
}
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitI64x2ShrS(this, dst, lhs, rhs.gp(),
+ /*shift_is_rcx=*/rhs.gp() == rcx);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitI64x2ShrS(this, dst, lhs, Immediate(rhs));
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ liftoff::EmitSimdShiftOp<&Assembler::vpsrlq, &Assembler::psrlq, 6>(this, dst,
+ lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
+ this, dst, lhs, rhs);
+}
+
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpaddq, &Assembler::paddq>(
@@ -2937,6 +3422,89 @@ void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
Andnpd(dst.fp(), kScratchDoubleReg);
}
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
+ vpand(dst.fp(), src.fp(), kScratchDoubleReg);
+ } else {
+ movaps(kScratchDoubleReg, src.fp());
+ cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ pand(dst.fp(), kScratchDoubleReg);
+ }
+ // Set top bit if >= 0 (but not -0.0!).
+ Pxor(kScratchDoubleReg, dst.fp());
+ // Convert to int.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Set top bit if >=0 is now < 0.
+ Pand(kScratchDoubleReg, dst.fp());
+ Psrad(kScratchDoubleReg, byte{31});
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ Pxor(dst.fp(), kScratchDoubleReg);
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ // NAN->0, negative->0.
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vmaxps(dst.fp(), src.fp(), kScratchDoubleReg);
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ maxps(dst.fp(), kScratchDoubleReg);
+ }
+ // scratch: float representation of max_signed.
+ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ Psrld(kScratchDoubleReg, uint8_t{1}); // 0x7fffffff
+ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // 0x4f000000
+ // scratch2: convert (src-max_signed).
+ // Set positive overflow lanes to 0x7FFFFFFF.
+ // Set negative lanes to 0.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vsubps(liftoff::kScratchDoubleReg2, dst.fp(), kScratchDoubleReg);
+ } else {
+ movaps(liftoff::kScratchDoubleReg2, dst.fp());
+ subps(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ }
+ Cmpleps(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
+ Cvttps2dq(liftoff::kScratchDoubleReg2, liftoff::kScratchDoubleReg2);
+ Pxor(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ Pxor(kScratchDoubleReg, kScratchDoubleReg);
+ Pmaxsd(liftoff::kScratchDoubleReg2, kScratchDoubleReg);
+ // Convert to int. Overflow lanes above max_signed will be 0x80000000.
+ Cvttps2dq(dst.fp(), dst.fp());
+ // Add (src-max_signed) for overflow lanes.
+ Paddd(dst.fp(), liftoff::kScratchDoubleReg2);
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Cvtdq2ps(dst.fp(), src.fp());
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ Pxor(kScratchDoubleReg, kScratchDoubleReg); // Zeros.
+ Pblendw(kScratchDoubleReg, src.fp(), uint8_t{0x55}); // Get lo 16 bits.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpsubd(dst.fp(), src.fp(), kScratchDoubleReg); // Get hi 16 bits.
+ } else {
+ if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
+ psubd(dst.fp(), kScratchDoubleReg);
+ }
+ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // Convert lo exactly.
+ Psrld(dst.fp(), byte{1}); // Divide by 2 to get in unsigned range.
+ Cvtdq2ps(dst.fp(), dst.fp()); // Convert hi, exactly.
+ Addps(dst.fp(), dst.fp()); // Double hi, exactly.
+ Addps(dst.fp(), kScratchDoubleReg); // Add hi and lo, may round.
+}
+
void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc
index cd5d04bd2d8..aedf5726194 100644
--- a/chromium/v8/src/wasm/c-api.cc
+++ b/chromium/v8/src/wasm/c-api.cc
@@ -71,10 +71,19 @@ ValKind V8ValueTypeToWasm(i::wasm::ValueType v8_valtype) {
return F32;
case i::wasm::ValueType::kF64:
return F64;
- case i::wasm::ValueType::kFuncRef:
- return FUNCREF;
- case i::wasm::ValueType::kAnyRef:
- return ANYREF;
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (v8_valtype.heap_type()) {
+ case i::wasm::kHeapFunc:
+ return FUNCREF;
+ case i::wasm::kHeapExtern:
+ // TODO(7748): Rename this to EXTERNREF if/when third-party API
+ // changes.
+ return ANYREF;
+ default:
+ // TODO(wasm+): support new value types
+ UNREACHABLE();
+ }
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -94,7 +103,7 @@ i::wasm::ValueType WasmValKindToV8(ValKind kind) {
case FUNCREF:
return i::wasm::kWasmFuncRef;
case ANYREF:
- return i::wasm::kWasmAnyRef;
+ return i::wasm::kWasmExternRef;
default:
// TODO(wasm+): support new value types
UNREACHABLE();
@@ -201,8 +210,6 @@ auto seal(const typename implement<C>::type* x) -> const C* {
// Configuration
struct ConfigImpl {
- ConfigImpl() {}
- ~ConfigImpl() {}
};
template <>
@@ -249,7 +256,7 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config>&& config) -> own<Engine> {
i::FLAG_expose_gc = true;
- i::FLAG_experimental_wasm_anyref = true;
+ i::FLAG_experimental_wasm_reftypes = true;
i::FLAG_experimental_wasm_bigint = true;
i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
@@ -372,10 +379,10 @@ ValTypeImpl* valtype_i32 = new ValTypeImpl(I32);
ValTypeImpl* valtype_i64 = new ValTypeImpl(I64);
ValTypeImpl* valtype_f32 = new ValTypeImpl(F32);
ValTypeImpl* valtype_f64 = new ValTypeImpl(F64);
-ValTypeImpl* valtype_anyref = new ValTypeImpl(ANYREF);
+ValTypeImpl* valtype_externref = new ValTypeImpl(ANYREF);
ValTypeImpl* valtype_funcref = new ValTypeImpl(FUNCREF);
-ValType::~ValType() {}
+ValType::~ValType() = default;
void ValType::operator delete(void*) {}
@@ -395,7 +402,7 @@ own<ValType> ValType::make(ValKind k) {
valtype = valtype_f64;
break;
case ANYREF:
- valtype = valtype_anyref;
+ valtype = valtype_externref;
break;
case FUNCREF:
valtype = valtype_funcref;
@@ -417,7 +424,7 @@ struct ExternTypeImpl {
ExternKind kind;
explicit ExternTypeImpl(ExternKind kind) : kind(kind) {}
- virtual ~ExternTypeImpl() {}
+ virtual ~ExternTypeImpl() = default;
};
template <>
@@ -455,8 +462,6 @@ struct FuncTypeImpl : ExternTypeImpl {
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
-
- ~FuncTypeImpl() {}
};
template <>
@@ -464,7 +469,7 @@ struct implement<FuncType> {
using type = FuncTypeImpl;
};
-FuncType::~FuncType() {}
+FuncType::~FuncType() = default;
auto FuncType::make(ownvec<ValType>&& params, ownvec<ValType>&& results)
-> own<FuncType> {
@@ -510,7 +515,7 @@ struct GlobalTypeImpl : ExternTypeImpl {
content(std::move(content)),
mutability(mutability) {}
- ~GlobalTypeImpl() {}
+ ~GlobalTypeImpl() override = default;
};
template <>
@@ -518,7 +523,7 @@ struct implement<GlobalType> {
using type = GlobalTypeImpl;
};
-GlobalType::~GlobalType() {}
+GlobalType::~GlobalType() = default;
auto GlobalType::make(own<ValType>&& content, Mutability mutability)
-> own<GlobalType> {
@@ -563,7 +568,7 @@ struct TableTypeImpl : ExternTypeImpl {
element(std::move(element)),
limits(limits) {}
- ~TableTypeImpl() {}
+ ~TableTypeImpl() override = default;
};
template <>
@@ -571,7 +576,7 @@ struct implement<TableType> {
using type = TableTypeImpl;
};
-TableType::~TableType() {}
+TableType::~TableType() = default;
auto TableType::make(own<ValType>&& element, Limits limits) -> own<TableType> {
return element ? own<TableType>(seal<TableType>(
@@ -609,7 +614,7 @@ struct MemoryTypeImpl : ExternTypeImpl {
explicit MemoryTypeImpl(Limits limits)
: ExternTypeImpl(EXTERN_MEMORY), limits(limits) {}
- ~MemoryTypeImpl() {}
+ ~MemoryTypeImpl() override = default;
};
template <>
@@ -617,7 +622,7 @@ struct implement<MemoryType> {
using type = MemoryTypeImpl;
};
-MemoryType::~MemoryType() {}
+MemoryType::~MemoryType() = default;
auto MemoryType::make(Limits limits) -> own<MemoryType> {
return own<MemoryType>(
@@ -655,8 +660,6 @@ struct ImportTypeImpl {
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
-
- ~ImportTypeImpl() {}
};
template <>
@@ -697,8 +700,6 @@ struct ExportTypeImpl {
ExportTypeImpl(Name& name, // NOLINT(runtime/references)
own<ExternType>& type) // NOLINT(runtime/references)
: name(std::move(name)), type(std::move(type)) {}
-
- ~ExportTypeImpl() {}
};
template <>
@@ -767,7 +768,7 @@ class RefImpl {
}
private:
- RefImpl() {}
+ RefImpl() = default;
i::Address* location() const {
return reinterpret_cast<i::Address*>(val_.address());
@@ -813,8 +814,6 @@ struct FrameImpl {
func_offset(func_offset),
module_offset(module_offset) {}
- ~FrameImpl() {}
-
own<Instance> instance;
uint32_t func_index;
size_t func_offset;
@@ -854,7 +853,7 @@ struct implement<Trap> {
using type = RefImpl<Trap, i::JSReceiver>;
};
-Trap::~Trap() {}
+Trap::~Trap() = default;
auto Trap::copy() const -> own<Trap> { return impl(this)->copy(); }
@@ -941,7 +940,7 @@ struct implement<Foreign> {
using type = RefImpl<Foreign, i::JSReceiver>;
};
-Foreign::~Foreign() {}
+Foreign::~Foreign() = default;
auto Foreign::copy() const -> own<Foreign> { return impl(this)->copy(); }
@@ -962,7 +961,7 @@ struct implement<Module> {
using type = RefImpl<Module, i::WasmModuleObject>;
};
-Module::~Module() {}
+Module::~Module() = default;
auto Module::copy() const -> own<Module> { return impl(this)->copy(); }
@@ -1106,7 +1105,7 @@ struct implement<Extern> {
using type = RefImpl<Extern, i::JSReceiver>;
};
-Extern::~Extern() {}
+Extern::~Extern() = default;
auto Extern::copy() const -> own<Extern> { return impl(this)->copy(); }
@@ -1177,7 +1176,7 @@ struct implement<Func> {
using type = RefImpl<Func, i::JSFunction>;
};
-Func::~Func() {}
+Func::~Func() = default;
auto Func::copy() const -> own<Func> { return impl(this)->copy(); }
@@ -1384,7 +1383,7 @@ void PrepareFunctionData(i::Isolate* isolate,
if (!function_data->c_wrapper_code().IsSmi()) return;
// Compile wrapper code.
i::Handle<i::Code> wrapper_code =
- i::compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
+ i::compiler::CompileCWasmEntry(isolate, sig);
function_data->set_c_wrapper_code(*wrapper_code);
// Compute packed args size.
function_data->set_packed_args_size(
@@ -1414,16 +1413,13 @@ void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
case i::wasm::ValueType::kF64:
packer->Push(args[i].f64());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef:
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ // TODO(7748): Make sure this works for all types.
packer->Push(WasmRefToV8(store->i_isolate(), args[i].ref())->ptr());
break;
- case i::wasm::ValueType::kExnRef:
- // TODO(jkummerow): Implement these.
- UNIMPLEMENTED();
- break;
default:
+ // TODO(7748): Implement these.
UNIMPLEMENTED();
}
}
@@ -1447,20 +1443,23 @@ void PopArgs(const i::wasm::FunctionSig* sig, Val results[],
case i::wasm::ValueType::kF64:
results[i] = Val(packer->Pop<double>());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef: {
- i::Address raw = packer->Pop<i::Address>();
- i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
- DCHECK_IMPLIES(type == i::wasm::kWasmNullRef, obj->IsNull());
- results[i] = Val(V8RefValueToWasm(store, obj));
- break;
- }
- case i::wasm::ValueType::kExnRef:
- // TODO(jkummerow): Implement these.
- UNIMPLEMENTED();
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (type.heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc: {
+ i::Address raw = packer->Pop<i::Address>();
+ i::Handle<i::Object> obj(i::Object(raw), store->i_isolate());
+ results[i] = Val(V8RefValueToWasm(store, obj));
+ break;
+ }
+ default:
+ // TODO(jkummerow): Implement these.
+ UNIMPLEMENTED();
+ }
break;
default:
+ // TODO(7748): Implement these.
UNIMPLEMENTED();
}
}
@@ -1662,7 +1661,7 @@ struct implement<Global> {
using type = RefImpl<Global, i::WasmGlobalObject>;
};
-Global::~Global() {}
+Global::~Global() = default;
auto Global::copy() const -> own<Global> { return impl(this)->copy(); }
@@ -1707,14 +1706,21 @@ auto Global::get() const -> Val {
return Val(v8_global->GetF32());
case i::wasm::ValueType::kF64:
return Val(v8_global->GetF64());
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef: {
- StoreImpl* store = impl(this)->store();
- i::HandleScope scope(store->i_isolate());
- return Val(V8RefValueToWasm(store, v8_global->GetRef()));
- }
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef:
+ switch (v8_global->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc: {
+ StoreImpl* store = impl(this)->store();
+ i::HandleScope scope(store->i_isolate());
+ return Val(V8RefValueToWasm(store, v8_global->GetRef()));
+ }
+ default:
+ // TODO(wasm+): Support new value types.
+ UNREACHABLE();
+ }
default:
- // TODO(wasm+): support new value types
+ // TODO(7748): Implement these.
UNREACHABLE();
}
}
@@ -1731,7 +1737,7 @@ void Global::set(const Val& val) {
case F64:
return v8_global->SetF64(val.f64());
case ANYREF:
- return v8_global->SetAnyRef(
+ return v8_global->SetExternRef(
WasmRefToV8(impl(this)->store()->i_isolate(), val.ref()));
case FUNCREF: {
i::Isolate* isolate = impl(this)->store()->i_isolate();
@@ -1754,7 +1760,7 @@ struct implement<Table> {
using type = RefImpl<Table, i::WasmTableObject>;
};
-Table::~Table() {}
+Table::~Table() = default;
auto Table::copy() const -> own<Table> { return impl(this)->copy(); }
@@ -1772,8 +1778,8 @@ auto Table::make(Store* store_abs, const TableType* type, const Ref* ref)
break;
case ANYREF:
// See Engine::make().
- DCHECK(i::wasm::WasmFeatures::FromFlags().has_anyref());
- i_type = i::wasm::kWasmAnyRef;
+ DCHECK(i::wasm::WasmFeatures::FromFlags().has_reftypes());
+ i_type = i::wasm::kWasmExternRef;
break;
default:
UNREACHABLE();
@@ -1815,11 +1821,11 @@ auto Table::type() const -> own<TableType> {
uint32_t max;
if (!table->maximum_length().ToUint32(&max)) max = 0xFFFFFFFFu;
ValKind kind;
- switch (table->type().kind()) {
- case i::wasm::ValueType::kFuncRef:
+ switch (table->type().heap_type()) {
+ case i::wasm::kHeapFunc:
kind = FUNCREF;
break;
- case i::wasm::ValueType::kAnyRef:
+ case i::wasm::kHeapExtern:
kind = ANYREF;
break;
default:
@@ -1873,7 +1879,7 @@ struct implement<Memory> {
using type = RefImpl<Memory, i::WasmMemoryObject>;
};
-Memory::~Memory() {}
+Memory::~Memory() = default;
auto Memory::copy() const -> own<Memory> { return impl(this)->copy(); }
@@ -1941,7 +1947,7 @@ struct implement<Instance> {
using type = RefImpl<Instance, i::WasmInstanceObject>;
};
-Instance::~Instance() {}
+Instance::~Instance() = default;
auto Instance::copy() const -> own<Instance> { return impl(this)->copy(); }
diff --git a/chromium/v8/src/wasm/c-api.h b/chromium/v8/src/wasm/c-api.h
index 43a0fb73b2d..426806f1d20 100644
--- a/chromium/v8/src/wasm/c-api.h
+++ b/chromium/v8/src/wasm/c-api.h
@@ -43,7 +43,7 @@ class StoreImpl {
private:
friend own<Store> Store::make(Engine*);
- StoreImpl() {}
+ StoreImpl() = default;
v8::Isolate::CreateParams create_params_;
v8::Isolate* isolate_ = nullptr;
diff --git a/chromium/v8/src/wasm/decoder.h b/chromium/v8/src/wasm/decoder.h
index 695960086e1..8e6afe67f0a 100644
--- a/chromium/v8/src/wasm/decoder.h
+++ b/chromium/v8/src/wasm/decoder.h
@@ -137,7 +137,6 @@ class Decoder {
if (length == nullptr) {
length = &unused_length;
}
- DCHECK(WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(*pc)));
uint32_t index;
if (*pc == WasmOpcode::kSimdPrefix) {
// SIMD opcodes can be multiple bytes (when LEB128 encoded).
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index 48b804a3a92..d038a7c8d52 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -18,6 +18,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
@@ -42,7 +43,7 @@ struct WasmException;
}())
#define CHECK_PROTOTYPE_OPCODE_GEN(feat, opt_break) \
- DCHECK(!this->module_ || this->module_->origin == kWasmOrigin); \
+ DCHECK(this->module_->origin == kWasmOrigin); \
if (!this->enabled_.has_##feat()) { \
this->error("Invalid opcode (enable with --experimental-wasm-" #feat ")"); \
opt_break \
@@ -128,6 +129,138 @@ struct WasmException;
V(I64AtomicStore16U, Uint16) \
V(I64AtomicStore32U, Uint32)
+namespace value_type_reader {
+
+// Read a value type starting at address 'pc' in 'decoder'.
+// No bytes are consumed. The result is written into the 'result' parameter.
+// Returns the amount of bytes read, or 0 if decoding failed.
+// Registers an error if the type opcode is invalid iff validate is set.
+template <Decoder::ValidateFlag validate>
+ValueType read_value_type(Decoder* decoder, const byte* pc,
+ uint32_t* const length, const WasmFeatures& enabled) {
+ *length = 1;
+ byte val = decoder->read_u8<validate>(pc, "value type opcode");
+ if (decoder->failed()) {
+ return kWasmBottom;
+ }
+
+ ValueTypeCode code = static_cast<ValueTypeCode>(val);
+
+#define REF_TYPE_CASE(heap_type, nullable, feature) \
+ case kLocal##heap_type##Ref: { \
+ ValueType result = ValueType::Ref(kHeap##heap_type, nullable); \
+ if (enabled.has_##feature()) { \
+ return result; \
+ } \
+ decoder->errorf( \
+ pc, "invalid value type '%s', enable with --experimental-wasm-%s", \
+ result.type_name().c_str(), #feature); \
+ return kWasmBottom; \
+ }
+
+ switch (code) {
+ REF_TYPE_CASE(Func, kNullable, reftypes)
+ REF_TYPE_CASE(Extern, kNullable, reftypes)
+ REF_TYPE_CASE(Eq, kNullable, gc)
+ REF_TYPE_CASE(Exn, kNullable, eh)
+ case kLocalI32:
+ return kWasmI32;
+ case kLocalI64:
+ return kWasmI64;
+ case kLocalF32:
+ return kWasmF32;
+ case kLocalF64:
+ return kWasmF64;
+ case kLocalRef:
+ case kLocalOptRef: {
+ // Set length for the macro-defined cases:
+ *length += 1;
+ Nullability nullability = code == kLocalOptRef ? kNullable : kNonNullable;
+ uint8_t heap_index = decoder->read_u8<validate>(pc + 1, "heap type");
+ switch (static_cast<ValueTypeCode>(heap_index)) {
+ REF_TYPE_CASE(Func, nullability, typed_funcref)
+ REF_TYPE_CASE(Extern, nullability, typed_funcref)
+ REF_TYPE_CASE(Eq, nullability, gc)
+ REF_TYPE_CASE(Exn, nullability, eh)
+ default:
+ uint32_t type_index =
+ decoder->read_u32v<validate>(pc + 1, length, "type index");
+ *length += 1;
+ if (!enabled.has_gc()) {
+ decoder->error(
+ pc,
+ "invalid value type '(ref [null] (type $t))', enable with "
+ "--experimental-wasm-typed-gc");
+ return kWasmBottom;
+ }
+
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ decoder->errorf(pc + 1,
+ "Type index %u is greater than the maximum "
+ "number %zu of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ return ValueType::Ref(static_cast<HeapType>(type_index), nullability);
+ }
+ decoder->errorf(
+ pc,
+ "invalid value type '(ref%s $t)', enable with --experimental-wasm-gc",
+ nullability ? " null" : "");
+ return kWasmBottom;
+ }
+#undef REF_TYPE_CASE
+ case kLocalRtt:
+ if (enabled.has_gc()) {
+ uint32_t depth_length;
+ uint32_t depth =
+ decoder->read_u32v<validate>(pc + 1, &depth_length, "depth");
+ // TODO(7748): Introduce a proper limit.
+ const uint32_t kMaxRttSubtypingDepth = 7;
+ if (!VALIDATE(depth <= kMaxRttSubtypingDepth)) {
+ decoder->errorf(pc,
+ "subtyping depth %u is greater than the maximum "
+ "depth %u supported by V8",
+ depth, kMaxRttSubtypingDepth);
+ return kWasmBottom;
+ }
+ uint32_t type_index = decoder->read_u32v<validate>(
+ pc + 1 + depth_length, length, "type index");
+ if (!VALIDATE(type_index < kV8MaxWasmTypes)) {
+ decoder->errorf(pc,
+ "Type index %u is greater than the maximum "
+ "number %zu of type definitions supported by V8",
+ type_index, kV8MaxWasmTypes);
+ return kWasmBottom;
+ }
+ *length += 1 + depth_length;
+ return ValueType::Rtt(static_cast<HeapType>(type_index),
+ static_cast<uint8_t>(depth));
+ }
+ decoder->error(
+ pc, "invalid value type 'rtt', enable with --experimental-wasm-gc");
+ return kWasmBottom;
+ case kLocalS128:
+ if (enabled.has_simd()) {
+ return kWasmS128;
+ }
+ decoder->error(
+ pc,
+ "invalid value type 'Simd128', enable with --experimental-wasm-simd");
+ return kWasmBottom;
+ case kLocalVoid:
+ case kLocalI8:
+ case kLocalI16:
+ // Although these types are included in ValueType, they are technically
+ // not value types and are only used in specific contexts. The caller of
+ // this function is responsible to check for them separately.
+ break;
+ }
+ // Malformed modules specifying invalid types can get here.
+ return kWasmBottom;
+}
+} // namespace value_type_reader
+
// Helpers for decoding different kinds of immediates which follow bytecodes.
template <Decoder::ValidateFlag validate>
struct LocalIndexImmediate {
@@ -174,7 +307,9 @@ struct ImmF32Immediate {
float value;
uint32_t length = 4;
inline ImmF32Immediate(Decoder* decoder, const byte* pc) {
- // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ // We can't use bit_cast here because calling any helper function that
+ // returns a float would potentially flip NaN bits per C++ semantics, so we
+ // have to inline the memcpy call directly.
uint32_t tmp = decoder->read_u32<validate>(pc + 1, "immf32");
memcpy(&value, &tmp, sizeof(value));
}
@@ -192,6 +327,17 @@ struct ImmF64Immediate {
};
template <Decoder::ValidateFlag validate>
+struct RefNullImmediate {
+ ValueType type;
+ uint32_t length = 1;
+ inline RefNullImmediate(const WasmFeatures& enabled, Decoder* decoder,
+ const byte* pc) {
+ type = value_type_reader::read_value_type<validate>(decoder, pc + 1,
+ &length, enabled);
+ }
+};
+
+template <Decoder::ValidateFlag validate>
struct GlobalIndexImmediate {
uint32_t index;
ValueType type = kWasmStmt;
@@ -203,135 +349,6 @@ struct GlobalIndexImmediate {
}
};
-namespace value_type_reader {
-
-// Read a value type starting at address 'pc' in 'decoder'.
-// No bytes are consumed. The result is written into the 'result' parameter.
-// Returns the amount of bytes read, or 0 if decoding failed.
-// Registers an error if the type opcode is invalid iff validate is set.
-template <Decoder::ValidateFlag validate>
-uint32_t read_value_type(Decoder* decoder, const byte* pc, ValueType* result,
- const WasmFeatures& enabled) {
- byte val = decoder->read_u8<validate>(pc, "value type opcode");
- if (decoder->failed()) return 0;
-
- ValueTypeCode code = static_cast<ValueTypeCode>(val);
- switch (code) {
- case kLocalI32:
- *result = kWasmI32;
- return 1;
- case kLocalI64:
- *result = kWasmI64;
- return 1;
- case kLocalF32:
- *result = kWasmF32;
- return 1;
- case kLocalF64:
- *result = kWasmF64;
- return 1;
- case kLocalAnyRef:
- if (enabled.has_anyref()) {
- *result = kWasmAnyRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'anyref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalFuncRef:
- if (enabled.has_anyref()) {
- *result = kWasmFuncRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'funcref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalNullRef:
- if (enabled.has_anyref()) {
- *result = kWasmNullRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'nullref', enable with "
- "--experimental-wasm-anyref");
- return 0;
- case kLocalExnRef:
- if (enabled.has_eh()) {
- *result = kWasmExnRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'exception ref', enable with "
- "--experimental-wasm-eh");
- return 0;
- case kLocalRef:
- if (enabled.has_gc()) {
- uint32_t length;
- uint32_t type_index =
- decoder->read_u32v<validate>(pc + 1, &length, "type index");
- *result = ValueType(ValueType::kRef, type_index);
- return length + 1;
- }
- decoder->error(pc,
- "invalid value type 'ref', enable with "
- "--experimental-wasm-gc");
- return 0;
- case kLocalOptRef:
- if (enabled.has_gc()) {
- uint32_t length;
- uint32_t type_index =
- decoder->read_u32v<validate>(pc + 1, &length, "type index");
- *result = ValueType(ValueType::kOptRef, type_index);
- return length + 1;
- }
- decoder->error(pc,
- "invalid value type 'optref', enable with "
- "--experimental-wasm-gc");
- return 0;
- case kLocalEqRef:
- if (enabled.has_gc()) {
- *result = kWasmEqRef;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'eqref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalI31Ref:
- if (enabled.has_gc()) {
- // TODO(7748): Implement
- decoder->error(pc, "'i31ref' is unimplemented");
- }
- decoder->error(pc,
- "invalid value type 'i31ref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalRttRef:
- if (enabled.has_gc()) {
- // TODO(7748): Implement
- decoder->error(pc, "'rttref' is unimplemented");
- }
- decoder->error(pc,
- "invalid value type 'rttref', enable with "
- "--experimental-wasm-simd");
- return 0;
- case kLocalS128:
- if (enabled.has_simd()) {
- *result = kWasmS128;
- return 1;
- }
- decoder->error(pc,
- "invalid value type 'Simd128', enable with "
- "--experimental-wasm-simd");
- return 0;
- default:
- *result = kWasmBottom;
- return 0;
- }
-}
-} // namespace value_type_reader
-
template <Decoder::ValidateFlag validate>
struct SelectTypeImmediate {
uint32_t length;
@@ -346,10 +363,11 @@ struct SelectTypeImmediate {
pc + 1, "Invalid number of types. Select accepts exactly one type");
return;
}
- uint32_t type_length = value_type_reader::read_value_type<validate>(
- decoder, pc + length + 1, &type, enabled);
+ uint32_t type_length;
+ type = value_type_reader::read_value_type<validate>(
+ decoder, pc + length + 1, &type_length, enabled);
length += type_length;
- if (type_length == 0) {
+ if (type == kWasmBottom) {
decoder->error(pc + 1, "invalid select type");
}
}
@@ -368,9 +386,9 @@ struct BlockTypeImmediate {
// 1st case: void block. Struct fields stay at default values.
return;
}
- length = value_type_reader::read_value_type<validate>(decoder, pc + 1,
- &type, enabled);
- if (length > 0) {
+ type = value_type_reader::read_value_type<validate>(decoder, pc + 1,
+ &length, enabled);
+ if (type != kWasmBottom) {
// 2nd case: block with val type immediate.
return;
}
@@ -497,6 +515,17 @@ struct ArrayIndexImmediate {
}
};
+// TODO(jkummerow): Make this a superclass of StructIndexImmediate and
+// ArrayIndexImmediate? Maybe even FunctionIndexImmediate too?
+template <Decoder::ValidateFlag validate>
+struct TypeIndexImmediate {
+ uint32_t index = 0;
+ uint32_t length = 0;
+ inline TypeIndexImmediate(Decoder* decoder, const byte* pc) {
+ index = decoder->read_u32v<validate>(pc, &length, "type index");
+ }
+};
+
template <Decoder::ValidateFlag validate>
struct CallIndirectImmediate {
uint32_t table_index;
@@ -509,7 +538,7 @@ struct CallIndirectImmediate {
sig_index = decoder->read_u32v<validate>(pc + 1, &len, "signature index");
TableIndexImmediate<validate> table(decoder, pc + len);
if (!VALIDATE((table.index == 0 && table.length == 1) ||
- enabled.has_anyref())) {
+ enabled.has_reftypes())) {
decoder->errorf(pc + 1 + len, "expected table index 0, found %u",
table.index);
}
@@ -733,7 +762,7 @@ struct Merge {
// Reachability::kReachable.
bool reached;
- Merge(bool reached = false) : reached(reached) {}
+ explicit Merge(bool reached = false) : reached(reached) {}
Value& operator[](uint32_t i) {
DCHECK_GT(arity, i);
@@ -746,6 +775,7 @@ enum ControlKind : uint8_t {
kControlIfElse,
kControlBlock,
kControlLoop,
+ kControlLet,
kControlTry,
kControlTryCatch
};
@@ -763,6 +793,7 @@ enum Reachability : uint8_t {
template <typename Value>
struct ControlBase {
ControlKind kind = kControlBlock;
+ uint32_t locals_count = 0;
uint32_t stack_depth = 0; // stack height at the beginning of the construct.
const uint8_t* pc = nullptr;
Reachability reachability = kReachable;
@@ -773,13 +804,16 @@ struct ControlBase {
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(ControlBase);
- ControlBase(ControlKind kind, uint32_t stack_depth, const uint8_t* pc,
- Reachability reachability)
+ ControlBase(ControlKind kind, uint32_t locals_count, uint32_t stack_depth,
+ const uint8_t* pc, Reachability reachability)
: kind(kind),
+ locals_count(locals_count),
stack_depth(stack_depth),
pc(pc),
reachability(reachability),
- start_merge(reachability == kReachable) {}
+ start_merge(reachability == kReachable) {
+ DCHECK(kind == kControlLet || locals_count == 0);
+ }
// Check whether the current block is reachable.
bool reachable() const { return reachability == kReachable; }
@@ -799,6 +833,7 @@ struct ControlBase {
bool is_onearmed_if() const { return kind == kControlIf; }
bool is_if_else() const { return kind == kControlIfElse; }
bool is_block() const { return kind == kControlBlock; }
+ bool is_let() const { return kind == kControlLet; }
bool is_loop() const { return kind == kControlLoop; }
bool is_incomplete_try() const { return kind == kControlTry; }
bool is_try_catch() const { return kind == kControlTryCatch; }
@@ -809,122 +844,120 @@ struct ControlBase {
}
};
-enum class LoadTransformationKind : uint8_t {
- kSplat,
- kExtend,
-};
-
// This is the list of callback functions that an interface for the
// WasmFullDecoder should implement.
// F(Name, args...)
-#define INTERFACE_FUNCTIONS(F) \
- /* General: */ \
- F(StartFunction) \
- F(StartFunctionBody, Control* block) \
- F(FinishFunction) \
- F(OnFirstError) \
- F(NextInstruction, WasmOpcode) \
- /* Control: */ \
- F(Block, Control* block) \
- F(Loop, Control* block) \
- F(Try, Control* block) \
- F(Catch, Control* block, Value* exception) \
- F(If, const Value& cond, Control* if_block) \
- F(FallThruTo, Control* c) \
- F(PopControl, Control* block) \
- F(EndControl, Control* block) \
- /* Instructions: */ \
- F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
- F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
- Value* result) \
- F(I32Const, Value* result, int32_t value) \
- F(I64Const, Value* result, int64_t value) \
- F(F32Const, Value* result, float value) \
- F(F64Const, Value* result, double value) \
- F(RefNull, Value* result) \
- F(RefFunc, uint32_t function_index, Value* result) \
- F(RefAsNonNull, const Value& arg, Value* result) \
- F(Drop, const Value& value) \
- F(DoReturn, Vector<Value> values) \
- F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
- F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
- F(LocalTee, const Value& value, Value* result, \
- const LocalIndexImmediate<validate>& imm) \
- F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
- F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
- F(TableGet, const Value& index, Value* result, \
- const TableIndexImmediate<validate>& imm) \
- F(TableSet, const Value& index, const Value& value, \
- const TableIndexImmediate<validate>& imm) \
- F(Unreachable) \
- F(Select, const Value& cond, const Value& fval, const Value& tval, \
- Value* result) \
- F(Br, Control* target) \
- F(BrIf, const Value& cond, uint32_t depth) \
- F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
- F(Else, Control* if_block) \
- F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, Value* result) \
- F(LoadTransform, LoadType type, LoadTransformationKind transform, \
- MemoryAccessImmediate<validate>& imm, const Value& index, Value* result) \
- F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
- const Value& index, const Value& value) \
- F(CurrentMemoryPages, Value* result) \
- F(MemoryGrow, const Value& value, Value* result) \
- F(CallDirect, const CallFunctionImmediate<validate>& imm, \
- const Value args[], Value returns[]) \
- F(CallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[], \
- Value returns[]) \
- F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
- const Value args[]) \
- F(ReturnCallIndirect, const Value& index, \
- const CallIndirectImmediate<validate>& imm, const Value args[]) \
- F(BrOnNull, const Value& ref_object, uint32_t depth) \
- F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
- F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
- const Vector<Value> inputs, Value* result) \
- F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
- const Value& input0, const Value& input1, Value* result) \
- F(Throw, const ExceptionIndexImmediate<validate>& imm, \
- const Vector<Value>& args) \
- F(Rethrow, const Value& exception) \
- F(BrOnException, const Value& exception, \
- const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
- Vector<Value> values) \
- F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
- const MemoryAccessImmediate<validate>& imm, Value* result) \
- F(AtomicFence) \
- F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(DataDrop, const DataDropImmediate<validate>& imm) \
- F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
- const Value& src, const Value& size) \
- F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
- const Value& value, const Value& size) \
- F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
- F(ElemDrop, const ElemDropImmediate<validate>& imm) \
- F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
- F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
- const Value& delta, Value* result) \
- F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
- F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
- const Value& value, const Value& count) \
- F(StructNew, const StructIndexImmediate<validate>& imm, const Value args[], \
- Value* result) \
- F(StructGet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, Value* result) \
- F(StructSet, const Value& struct_object, \
- const FieldIndexImmediate<validate>& field, const Value& field_value) \
- F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
- const Value& initial_value, Value* result) \
- F(ArrayGet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- Value* result) \
- F(ArraySet, const Value& array_obj, \
- const ArrayIndexImmediate<validate>& imm, const Value& index, \
- const Value& value) \
- F(ArrayLen, const Value& array_obj, Value* result) \
+#define INTERFACE_FUNCTIONS(F) \
+ /* General: */ \
+ F(StartFunction) \
+ F(StartFunctionBody, Control* block) \
+ F(FinishFunction) \
+ F(OnFirstError) \
+ F(NextInstruction, WasmOpcode) \
+ /* Control: */ \
+ F(Block, Control* block) \
+ F(Loop, Control* block) \
+ F(Try, Control* block) \
+ F(Catch, Control* block, Value* exception) \
+ F(If, const Value& cond, Control* if_block) \
+ F(FallThruTo, Control* c) \
+ F(PopControl, Control* block) \
+ F(EndControl, Control* block) \
+ /* Instructions: */ \
+ F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
+ F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
+ Value* result) \
+ F(I32Const, Value* result, int32_t value) \
+ F(I64Const, Value* result, int64_t value) \
+ F(F32Const, Value* result, float value) \
+ F(F64Const, Value* result, double value) \
+ F(RefNull, Value* result) \
+ F(RefFunc, uint32_t function_index, Value* result) \
+ F(RefAsNonNull, const Value& arg, Value* result) \
+ F(Drop, const Value& value) \
+ F(DoReturn, Vector<Value> values) \
+ F(LocalGet, Value* result, const LocalIndexImmediate<validate>& imm) \
+ F(LocalSet, const Value& value, const LocalIndexImmediate<validate>& imm) \
+ F(LocalTee, const Value& value, Value* result, \
+ const LocalIndexImmediate<validate>& imm) \
+ F(AllocateLocals, Vector<Value> local_values) \
+ F(DeallocateLocals, uint32_t count) \
+ F(GlobalGet, Value* result, const GlobalIndexImmediate<validate>& imm) \
+ F(GlobalSet, const Value& value, const GlobalIndexImmediate<validate>& imm) \
+ F(TableGet, const Value& index, Value* result, \
+ const TableIndexImmediate<validate>& imm) \
+ F(TableSet, const Value& index, const Value& value, \
+ const TableIndexImmediate<validate>& imm) \
+ F(Unreachable) \
+ F(Select, const Value& cond, const Value& fval, const Value& tval, \
+ Value* result) \
+ F(Br, Control* target) \
+ F(BrIf, const Value& cond, uint32_t depth) \
+ F(BrTable, const BranchTableImmediate<validate>& imm, const Value& key) \
+ F(Else, Control* if_block) \
+ F(LoadMem, LoadType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, Value* result) \
+ F(LoadTransform, LoadType type, LoadTransformationKind transform, \
+ MemoryAccessImmediate<validate>& imm, const Value& index, Value* result) \
+ F(StoreMem, StoreType type, const MemoryAccessImmediate<validate>& imm, \
+ const Value& index, const Value& value) \
+ F(CurrentMemoryPages, Value* result) \
+ F(MemoryGrow, const Value& value, Value* result) \
+ F(CallDirect, const CallFunctionImmediate<validate>& imm, \
+ const Value args[], Value returns[]) \
+ F(CallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[], \
+ Value returns[]) \
+ F(ReturnCall, const CallFunctionImmediate<validate>& imm, \
+ const Value args[]) \
+ F(ReturnCallIndirect, const Value& index, \
+ const CallIndirectImmediate<validate>& imm, const Value args[]) \
+ F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
+ F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
+ const Vector<Value> inputs, Value* result) \
+ F(Simd8x16ShuffleOp, const Simd8x16ShuffleImmediate<validate>& imm, \
+ const Value& input0, const Value& input1, Value* result) \
+ F(Throw, const ExceptionIndexImmediate<validate>& imm, \
+ const Vector<Value>& args) \
+ F(Rethrow, const Value& exception) \
+ F(BrOnException, const Value& exception, \
+ const ExceptionIndexImmediate<validate>& imm, uint32_t depth, \
+ Vector<Value> values) \
+ F(AtomicOp, WasmOpcode opcode, Vector<Value> args, \
+ const MemoryAccessImmediate<validate>& imm, Value* result) \
+ F(AtomicFence) \
+ F(MemoryInit, const MemoryInitImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(DataDrop, const DataDropImmediate<validate>& imm) \
+ F(MemoryCopy, const MemoryCopyImmediate<validate>& imm, const Value& dst, \
+ const Value& src, const Value& size) \
+ F(MemoryFill, const MemoryIndexImmediate<validate>& imm, const Value& dst, \
+ const Value& value, const Value& size) \
+ F(TableInit, const TableInitImmediate<validate>& imm, Vector<Value> args) \
+ F(ElemDrop, const ElemDropImmediate<validate>& imm) \
+ F(TableCopy, const TableCopyImmediate<validate>& imm, Vector<Value> args) \
+ F(TableGrow, const TableIndexImmediate<validate>& imm, const Value& value, \
+ const Value& delta, Value* result) \
+ F(TableSize, const TableIndexImmediate<validate>& imm, Value* result) \
+ F(TableFill, const TableIndexImmediate<validate>& imm, const Value& start, \
+ const Value& value, const Value& count) \
+ F(StructNew, const StructIndexImmediate<validate>& imm, const Value args[], \
+ Value* result) \
+ F(StructGet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, bool is_signed, Value* result) \
+ F(StructSet, const Value& struct_object, \
+ const FieldIndexImmediate<validate>& field, const Value& field_value) \
+ F(ArrayNew, const ArrayIndexImmediate<validate>& imm, const Value& length, \
+ const Value& initial_value, Value* result) \
+ F(ArrayGet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ bool is_signed, Value* result) \
+ F(ArraySet, const Value& array_obj, \
+ const ArrayIndexImmediate<validate>& imm, const Value& index, \
+ const Value& value) \
+ F(ArrayLen, const Value& array_obj, Value* result) \
+ F(RttCanon, const TypeIndexImmediate<validate>& imm, Value* result) \
F(PassThrough, const Value& from, Value* to)
// Generic Wasm bytecode decoder with utilities for decoding immediates,
@@ -954,44 +987,81 @@ class WasmDecoder : public Decoder {
: static_cast<uint32_t>(local_types_->size());
}
- static bool DecodeLocals(const WasmFeatures& enabled, Decoder* decoder,
- const FunctionSig* sig,
- ZoneVector<ValueType>* type_list) {
- DCHECK_NOT_NULL(type_list);
- DCHECK_EQ(0, type_list->size());
- // Initialize from signature.
- if (sig != nullptr) {
- type_list->assign(sig->parameters().begin(), sig->parameters().end());
+ void InitializeLocalsFromSig() {
+ if (sig_ != nullptr) {
+ local_types_->assign(sig_->parameters().begin(),
+ sig_->parameters().end());
}
+ }
+
+ // Decodes local definitions in the current decoder.
+ // Returns true iff locals are found.
+ // Writes the total length of decoded locals in 'total_length'.
+ // If insert_postion is present, the decoded locals will be inserted into the
+ // 'local_types_' of this decoder. Otherwise, this function is used just to
+ // check validity and determine the encoding length of the locals in bytes.
+ // The decoder's pc is not advanced. If no locals are found (i.e., no
+ // compressed uint32 is found at pc), this will exit as 'false' and without an
+ // error.
+ bool DecodeLocals(const byte* pc, uint32_t* total_length,
+ const base::Optional<uint32_t> insert_position) {
+ DCHECK_NOT_NULL(local_types_);
+
+ uint32_t length;
+ *total_length = 0;
+
+ // The 'else' value is useless, we pass it for convenience.
+ ZoneVector<ValueType>::iterator insert_iterator =
+ insert_position.has_value()
+ ? local_types_->begin() + insert_position.value()
+ : local_types_->begin();
+
// Decode local declarations, if any.
- uint32_t entries = decoder->consume_u32v("local decls count");
- if (decoder->failed()) return false;
+ uint32_t entries = read_u32v<kValidate>(pc, &length, "local decls count");
+ if (failed()) {
+ error(pc + *total_length, "invalid local decls count");
+ return false;
+ }
+ *total_length += length;
TRACE("local decls count: %u\n", entries);
- while (entries-- > 0 && decoder->more()) {
- uint32_t count = decoder->consume_u32v("local count");
- if (decoder->failed()) return false;
- DCHECK_LE(type_list->size(), kV8MaxWasmFunctionLocals);
- if (count > kV8MaxWasmFunctionLocals - type_list->size()) {
- decoder->error(decoder->pc() - 1, "local count too large");
+ while (entries-- > 0) {
+ if (!more()) {
+ error(end(), "expected more local decls but reached end of input");
+ return false;
+ }
+ uint32_t count =
+ read_u32v<kValidate>(pc + *total_length, &length, "local count");
+ if (failed()) {
+ error(pc + *total_length, "invalid local count");
return false;
}
- ValueType type;
- uint32_t type_length = value_type_reader::read_value_type<validate>(
- decoder, decoder->pc(), &type, enabled);
- if (type_length == 0) {
- decoder->error(decoder->pc(), "invalid local type");
+ DCHECK_LE(local_types_->size(), kV8MaxWasmFunctionLocals);
+ if (count > kV8MaxWasmFunctionLocals - local_types_->size()) {
+ error(pc + *total_length, "local count too large");
return false;
}
- type_list->insert(type_list->end(), count, type);
- decoder->consume_bytes(type_length);
+ *total_length += length;
+
+ ValueType type = value_type_reader::read_value_type<kValidate>(
+ this, pc + *total_length, &length, enabled_);
+ if (type == kWasmBottom) {
+ error(pc + *total_length, "invalid local type");
+ return false;
+ }
+ *total_length += length;
+ if (insert_position.has_value()) {
+ // Move the insertion iterator to the end of the newly inserted locals.
+ insert_iterator =
+ local_types_->insert(insert_iterator, count, type) + count;
+ }
}
- DCHECK(decoder->ok());
+ DCHECK(ok());
return true;
}
- static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
+ static BitVector* AnalyzeLoopAssignment(WasmDecoder* decoder, const byte* pc,
uint32_t locals_count, Zone* zone) {
if (pc >= decoder->end()) return nullptr;
if (*pc != kExprLoop) return nullptr;
@@ -1055,11 +1125,17 @@ class WasmDecoder : public Decoder {
return true;
}
- inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->exceptions.size())) {
+ inline bool Validate(const byte* pc, RefNullImmediate<validate>& imm) {
+ if (!VALIDATE(imm.type.is_nullable())) {
+ errorf(pc + 1, "ref.null does not exist for %s",
+ imm.type.type_name().c_str());
return false;
}
+ return true;
+ }
+
+ inline bool Complete(const byte* pc, ExceptionIndexImmediate<validate>& imm) {
+ if (!VALIDATE(imm.index < module_->exceptions.size())) return false;
imm.exception = &module_->exceptions[imm.index];
return true;
}
@@ -1073,7 +1149,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, GlobalIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && imm.index < module_->globals.size())) {
+ if (!VALIDATE(imm.index < module_->globals.size())) {
errorf(pc + 1, "invalid global index: %u", imm.index);
return false;
}
@@ -1083,9 +1159,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, StructIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_struct(imm.index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_struct(imm.index))) return false;
imm.struct_type = module_->struct_type(imm.index);
return true;
}
@@ -1104,9 +1178,7 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, ArrayIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_array(imm.index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_array(imm.index))) return false;
imm.array_type = module_->array_type(imm.index);
return true;
}
@@ -1117,6 +1189,15 @@ class WasmDecoder : public Decoder {
return false;
}
+ inline bool Validate(const byte* pc, TypeIndexImmediate<validate>& imm) {
+ if (!VALIDATE(module_ != nullptr && (module_->has_struct(imm.index) ||
+ module_->has_array(imm.index)))) {
+ errorf(pc, "invalid type index: %u", imm.index);
+ return false;
+ }
+ return true;
+ }
+
inline bool CanReturnCall(const FunctionSig* target_sig) {
if (target_sig == nullptr) return false;
size_t num_returns = sig_->return_count();
@@ -1128,11 +1209,11 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallFunctionImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->functions.size())) {
- return false;
- }
+ if (!VALIDATE(imm.index < module_->functions.size())) return false;
imm.sig = module_->functions[imm.index].sig;
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
@@ -1145,22 +1226,20 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- module_->has_signature(imm.sig_index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.table_index < module_->tables.size())) {
+ if (!VALIDATE(imm.table_index < module_->tables.size())) {
error("function table has to exist to execute call_indirect");
return false;
}
- if (!VALIDATE(module_ != nullptr &&
- module_->tables[imm.table_index].type == kWasmFuncRef)) {
+ if (!VALIDATE(module_->tables[imm.table_index].type == kWasmFuncRef)) {
error("table of call_indirect must be of type funcref");
return false;
}
@@ -1235,7 +1314,7 @@ class WasmDecoder : public Decoder {
max_lane = std::max(max_lane, imm.shuffle[i]);
}
// Shuffle indices must be in [0..31] for a 16 lane shuffle.
- if (!VALIDATE(max_lane <= 2 * kSimd128Size)) {
+ if (!VALIDATE(max_lane < 2 * kSimd128Size)) {
error(pc_ + 2, "invalid shuffle mask");
return false;
}
@@ -1244,24 +1323,24 @@ class WasmDecoder : public Decoder {
inline bool Complete(BlockTypeImmediate<validate>& imm) {
if (imm.type != kWasmBottom) return true;
- if (!VALIDATE(module_ && module_->has_signature(imm.sig_index))) {
- return false;
- }
+ if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
+ if (imm.sig->return_count() > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
return true;
}
inline bool Validate(BlockTypeImmediate<validate>& imm) {
if (!Complete(imm)) {
errorf(pc_, "block type index %u out of bounds (%zu types)",
- imm.sig_index, module_ ? module_->types.size() : 0);
+ imm.sig_index, module_->types.size());
return false;
}
return true;
}
inline bool Validate(const byte* pc, FunctionIndexImmediate<validate>& imm) {
- if (!module_) return true;
if (!VALIDATE(imm.index < module_->functions.size())) {
errorf(pc, "invalid function index: %u", imm.index);
return false;
@@ -1274,7 +1353,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, MemoryIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && module_->has_memory)) {
+ if (!VALIDATE(module_->has_memory)) {
errorf(pc + 1, "memory instruction with no memory");
return false;
}
@@ -1282,9 +1361,8 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(MemoryInitImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.data_segment_index <
- module_->num_declared_data_segments)) {
+ if (!VALIDATE(imm.data_segment_index <
+ module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.data_segment_index);
return false;
}
@@ -1294,8 +1372,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(DataDropImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->num_declared_data_segments)) {
+ if (!VALIDATE(imm.index < module_->num_declared_data_segments)) {
errorf(pc_ + 2, "invalid data segment index: %u", imm.index);
return false;
}
@@ -1309,7 +1386,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, TableIndexImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr && imm.index < module_->tables.size())) {
+ if (!VALIDATE(imm.index < module_->tables.size())) {
errorf(pc, "invalid table index: %u", imm.index);
return false;
}
@@ -1317,8 +1394,7 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(TableInitImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.elem_segment_index < module_->elem_segments.size())) {
+ if (!VALIDATE(imm.elem_segment_index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u",
imm.elem_segment_index);
return false;
@@ -1327,18 +1403,17 @@ class WasmDecoder : public Decoder {
return false;
}
ValueType elem_type = module_->elem_segments[imm.elem_segment_index].type;
- if (!VALIDATE(
- elem_type.IsSubTypeOf(module_->tables[imm.table.index].type))) {
+ if (!VALIDATE(IsSubtypeOf(elem_type, module_->tables[imm.table.index].type,
+ module_))) {
errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table.index,
- elem_type.type_name());
+ elem_type.type_name().c_str());
return false;
}
return true;
}
inline bool Validate(ElemDropImmediate<validate>& imm) {
- if (!VALIDATE(module_ != nullptr &&
- imm.index < module_->elem_segments.size())) {
+ if (!VALIDATE(imm.index < module_->elem_segments.size())) {
errorf(pc_ + 2, "invalid element segment index: %u", imm.index);
return false;
}
@@ -1349,16 +1424,16 @@ class WasmDecoder : public Decoder {
if (!Validate(pc_ + 1, imm.table_src)) return false;
if (!Validate(pc_ + 2, imm.table_dst)) return false;
ValueType src_type = module_->tables[imm.table_src.index].type;
- if (!VALIDATE(
- src_type.IsSubTypeOf(module_->tables[imm.table_dst.index].type))) {
+ if (!VALIDATE(IsSubtypeOf(
+ src_type, module_->tables[imm.table_dst.index].type, module_))) {
errorf(pc_ + 2, "table %u is not a super-type of %s", imm.table_dst.index,
- src_type.type_name());
+ src_type.type_name().c_str());
return false;
}
return true;
}
- static uint32_t OpcodeLength(Decoder* decoder, const byte* pc) {
+ static uint32_t OpcodeLength(WasmDecoder* decoder, const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
@@ -1403,6 +1478,15 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
+ case kExprLet: {
+ BlockTypeImmediate<validate> imm(WasmFeatures::All(), decoder, pc);
+ uint32_t locals_length;
+ bool locals_result =
+ decoder->DecodeLocals(decoder->pc() + 1 + imm.length,
+ &locals_length, base::Optional<uint32_t>());
+ return 1 + imm.length + (locals_result ? locals_length : 0);
+ }
+
case kExprThrow: {
ExceptionIndexImmediate<validate> imm(decoder, pc);
return 1 + imm.length;
@@ -1442,6 +1526,10 @@ class WasmDecoder : public Decoder {
return 1 + imm.length;
}
case kExprRefNull: {
+ RefNullImmediate<validate> imm(WasmFeatures::All(), decoder, pc);
+ return 1 + imm.length;
+ }
+ case kExprRefIsNull: {
return 1;
}
case kExprRefFunc: {
@@ -1594,13 +1682,27 @@ class WasmDecoder : public Decoder {
BranchDepthImmediate<validate> imm(decoder, pc + 2);
return 2 + imm.length;
}
- case kExprRttGet:
+ case kExprRttCanon: {
+ // TODO(7748): Introduce "HeapTypeImmediate" and use it here.
+ TypeIndexImmediate<validate> heaptype(decoder, pc + 2);
+ return 2 + heaptype.length;
+ }
case kExprRttSub: {
- // TODO(7748): Impelement.
- UNIMPLEMENTED();
+ // TODO(7748): Implement.
+ decoder->error(pc, "rtt.sub not implemented yet");
+ return 2;
}
+ case kExprI31New:
+ case kExprI31GetS:
+ case kExprI31GetU:
+ case kExprRefTest:
+ case kExprRefCast:
+ return 2;
+
default:
+ // This is unreachable except for malformed modules.
+ decoder->error(pc, "invalid gc opcode");
return 2;
}
}
@@ -1609,7 +1711,8 @@ class WasmDecoder : public Decoder {
}
}
- std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
+ // TODO(clemensb): This is only used by the interpreter; move there.
+ V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
// Handle "simple" opcodes with a fixed signature first.
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -1631,6 +1734,7 @@ class WasmDecoder : public Decoder {
case kExprMemoryGrow:
case kExprRefAsNonNull:
case kExprBrOnNull:
+ case kExprRefIsNull:
return {1, 1};
case kExprLocalSet:
case kExprGlobalSet:
@@ -1682,6 +1786,9 @@ class WasmDecoder : public Decoder {
case kExprReturnCallIndirect:
case kExprUnreachable:
return {0, 0};
+ case kExprLet:
+ // TODO(7748): Implement
+ return {0, 0};
case kNumericPrefix:
case kAtomicPrefix:
case kSimdPrefix: {
@@ -1712,12 +1819,14 @@ class WasmDecoder : public Decoder {
};
#define CALL_INTERFACE(name, ...) interface_.name(this, ##__VA_ARGS__)
-#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
- do { \
- DCHECK(!control_.empty()); \
- if (VALIDATE(this->ok()) && control_.back().reachable()) { \
- interface_.name(this, ##__VA_ARGS__); \
- } \
+#define CALL_INTERFACE_IF_REACHABLE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ DCHECK_EQ(current_code_reachable_, \
+ this->ok() && control_.back().reachable()); \
+ if (current_code_reachable_) { \
+ interface_.name(this, ##__VA_ARGS__); \
+ } \
} while (false)
#define CALL_INTERFACE_IF_PARENT_REACHABLE(name, ...) \
do { \
@@ -1765,8 +1874,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DCHECK_EQ(0, this->local_types_->size());
- WasmDecoder<validate>::DecodeLocals(this->enabled_, this, this->sig_,
- this->local_types_);
+ this->InitializeLocalsFromSig();
+ uint32_t locals_length;
+ this->DecodeLocals(this->pc(), &locals_length,
+ static_cast<uint32_t>(this->local_types_->size()));
+ this->consume_bytes(locals_length);
+
CALL_INTERFACE(StartFunction);
DecodeFunctionBody();
if (!this->failed()) CALL_INTERFACE(FinishFunction);
@@ -1839,6 +1952,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &*(stack_.end() - depth);
}
+ void SetSucceedingCodeDynamicallyUnreachable() {
+ Control* current = &control_.back();
+ if (current->reachable()) {
+ current->reachability = kSpecOnlyReachable;
+ current_code_reachable_ = false;
+ }
+ }
+
private:
Zone* zone_;
@@ -1847,6 +1968,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ZoneVector<ValueType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
+ // Controls whether code should be generated for the current block (basically
+ // a cache for {ok() && control_.back().reachable()}).
+ bool current_code_reachable_ = true;
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmBottom};
@@ -1895,832 +2019,905 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int len_ = 0;
};
- // Decodes the body of a function.
- void DecodeFunctionBody() {
- TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
- this->end(), this->pc_offset(),
- static_cast<int>(this->end() - this->start()));
-
- // Set up initial function block.
- {
- auto* c = PushControl(kControlBlock);
- InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
- InitMerge(&c->end_merge,
- static_cast<uint32_t>(this->sig_->return_count()),
- [&](uint32_t i) {
- return Value{this->pc_, this->sig_->GetReturn(i)};
- });
- CALL_INTERFACE(StartFunctionBody, c);
- }
-
- while (this->pc_ < this->end_) { // decoding loop.
- uint32_t len = 1;
- WasmOpcode opcode = static_cast<WasmOpcode>(*this->pc_);
-
- CALL_INTERFACE_IF_REACHABLE(NextInstruction, opcode);
+ // Helper to avoid calling member methods (which are more expensive to call
+ // indirectly).
+ template <WasmOpcode opcode>
+ static int DecodeOp(WasmFullDecoder* decoder) {
+ return decoder->DecodeOp<opcode>();
+ }
+ template <WasmOpcode opcode>
+ int DecodeOp() {
#if DEBUG
- TraceLine trace_msg;
+ TraceLine trace_msg;
#define TRACE_PART(...) trace_msg.Append(__VA_ARGS__)
- if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- }
+ if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(opcode));
+ }
#else
#define TRACE_PART(...)
#endif
- switch (opcode) {
+ int len = 1;
+
+ // TODO(clemensb): Break this up into individual functions.
+ switch (opcode) {
#define BUILD_SIMPLE_OPCODE(op, _, sig) \
case kExpr##op: \
BuildSimpleOperator_##sig(opcode); \
break;
- FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
+ FOREACH_SIMPLE_OPCODE(BUILD_SIMPLE_OPCODE)
#undef BUILD_SIMPLE_OPCODE
- case kExprNop:
- break;
- case kExprBlock: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* block = PushControl(kControlBlock);
- SetBlockType(block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(Block, block);
- PushMergeValues(block, &block->start_merge);
- len = 1 + imm.length;
- break;
- }
- case kExprRethrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- auto exception = Pop(0, kWasmExnRef);
- CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
- EndControl();
+ case kExprNop:
+ break;
+ case kExprBlock: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* block = PushControl(kControlBlock);
+ SetBlockType(block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(Block, block);
+ PushMergeValues(block, &block->start_merge);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRethrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ Value exception = Pop(0, kWasmExnRef);
+ CALL_INTERFACE_IF_REACHABLE(Rethrow, exception);
+ EndControl();
+ break;
+ }
+ case kExprThrow: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ ExceptionIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ ArgVector args = PopArgs(imm.exception->ToFunctionSig());
+ CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
+ EndControl();
+ break;
+ }
+ case kExprTry: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* try_block = PushControl(kControlTry);
+ SetBlockType(try_block, imm, args.begin());
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Try, try_block);
+ PushMergeValues(try_block, &try_block->start_merge);
+ break;
+ }
+ case kExprCatch: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ if (!VALIDATE(!control_.empty())) {
+ this->error("catch does not match any try");
break;
}
- case kExprThrow: {
- CHECK_PROTOTYPE_OPCODE(eh);
- ExceptionIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.exception->ToFunctionSig());
- CALL_INTERFACE_IF_REACHABLE(Throw, imm, VectorOf(args));
- EndControl();
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_try())) {
+ this->error("catch does not match any try");
break;
}
- case kExprTry: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* try_block = PushControl(kControlTry);
- SetBlockType(try_block, imm, args.begin());
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Try, try_block);
- PushMergeValues(try_block, &try_block->start_merge);
+ if (!VALIDATE(c->is_incomplete_try())) {
+ this->error("catch already present for try");
break;
}
- case kExprCatch: {
- CHECK_PROTOTYPE_OPCODE(eh);
- if (!VALIDATE(!control_.empty())) {
- this->error("catch does not match any try");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_try())) {
- this->error("catch does not match any try");
- break;
- }
- if (!VALIDATE(c->is_incomplete_try())) {
- this->error("catch already present for try");
- break;
+ c->kind = kControlTryCatch;
+ FallThruTo(c);
+ stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
+ c->reachability = control_at(1)->innerReachability();
+ current_code_reachable_ = this->ok() && c->reachable();
+ Value* exception = Push(kWasmExnRef);
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
+ break;
+ }
+ case kExprBrOnExn: {
+ CHECK_PROTOTYPE_OPCODE(eh);
+ BranchOnExceptionImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
+ if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
+ Control* c = control_at(imm.depth.depth);
+ Value exception = Pop(0, kWasmExnRef);
+ const WasmExceptionSig* sig = imm.index.exception->sig;
+ size_t value_count = sig->parameter_count();
+ // TODO(wasm): This operand stack mutation is an ugly hack to make
+ // both type checking here as well as environment merging in the
+ // graph builder interface work out of the box. We should introduce
+ // special handling for both and do minimal/no stack mutation here.
+ for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
+ Vector<Value> values(stack_.data() + c->stack_depth, value_count);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (this->failed()) break;
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
+ values);
+ c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
+ }
+ len = 1 + imm.length;
+ for (size_t i = 0; i < value_count; ++i) Pop();
+ Value* pexception = Push(kWasmExnRef);
+ *pexception = exception;
+ break;
+ }
+ case kExprBrOnNull: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ len = 1 + imm.length;
+ Value ref_object = Pop();
+ if (this->failed()) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ switch (ref_object.type.kind()) {
+ case ValueType::kRef: {
+ Value* result = Push(ref_object.type);
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ break;
+ }
+ case ValueType::kOptRef: {
+ // We need to Push the result value after calling BrOnNull on
+ // the interface. Therefore we must sync the ref_object and
+ // result nodes afterwards (in PassThrough).
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ Value* result = Push(
+ ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
+ CALL_INTERFACE(PassThrough, ref_object, result);
+ c->br_merge()->reached = true;
+ break;
+ }
+ default:
+ this->error(this->pc_,
+ "invalid agrument type to ref.as_non_null");
+ break;
}
- c->kind = kControlTryCatch;
- FallThruTo(c);
- stack_.erase(stack_.begin() + c->stack_depth, stack_.end());
- c->reachability = control_at(1)->innerReachability();
- auto* exception = Push(kWasmExnRef);
- CALL_INTERFACE_IF_PARENT_REACHABLE(Catch, c, exception);
- break;
}
- case kExprBrOnExn: {
- CHECK_PROTOTYPE_OPCODE(eh);
- BranchOnExceptionImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm.depth, control_.size())) break;
- if (!this->Validate(this->pc_ + imm.depth.length, imm.index)) break;
- Control* c = control_at(imm.depth.depth);
- auto exception = Pop(0, kWasmExnRef);
- const WasmExceptionSig* sig = imm.index.exception->sig;
- size_t value_count = sig->parameter_count();
- // TODO(wasm): This operand stack mutation is an ugly hack to make
- // both type checking here as well as environment merging in the
- // graph builder interface work out of the box. We should introduce
- // special handling for both and do minimal/no stack mutation here.
- for (size_t i = 0; i < value_count; ++i) Push(sig->GetParam(i));
- Vector<Value> values(stack_.data() + c->stack_depth, value_count);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(BrOnException, exception, imm.index, imm.depth.depth,
- values);
- c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- break;
- }
- len = 1 + imm.length;
- for (size_t i = 0; i < value_count; ++i) Pop();
- auto* pexception = Push(kWasmExnRef);
- *pexception = exception;
+ break;
+ }
+ case kExprLet: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ uint32_t current_local_count =
+ static_cast<uint32_t>(local_type_vec_.size());
+ // Temporarily add the let-defined values
+ // to the beginning of the function locals.
+ uint32_t locals_length;
+ if (!this->DecodeLocals(this->pc() + 1 + imm.length, &locals_length,
+ 0)) {
+ break;
+ }
+ len = 1 + imm.length + locals_length;
+ uint32_t locals_count =
+ static_cast<uint32_t>(local_type_vec_.size() - current_local_count);
+ ArgVector let_local_values =
+ PopArgs(static_cast<uint32_t>(imm.in_arity()),
+ VectorOf(local_type_vec_.data(), locals_count));
+ ArgVector args = PopArgs(imm.sig);
+ Control* let_block = PushControl(kControlLet, locals_count);
+ SetBlockType(let_block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(Block, let_block);
+ PushMergeValues(let_block, &let_block->start_merge);
+ CALL_INTERFACE_IF_REACHABLE(AllocateLocals, VectorOf(let_local_values));
+ break;
+ }
+ case kExprLoop: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Control* block = PushControl(kControlLoop);
+ SetBlockType(&control_.back(), imm, args.begin());
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(Loop, block);
+ PushMergeValues(block, &block->start_merge);
+ break;
+ }
+ case kExprIf: {
+ BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(imm)) break;
+ Value cond = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ if (!VALIDATE(this->ok())) break;
+ Control* if_block = PushControl(kControlIf);
+ SetBlockType(if_block, imm, args.begin());
+ CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
+ len = 1 + imm.length;
+ PushMergeValues(if_block, &if_block->start_merge);
+ break;
+ }
+ case kExprElse: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("else does not match any if");
break;
}
- case kExprBrOnNull: {
- CHECK_PROTOTYPE_OPCODE(gc);
- BranchDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- len = 1 + imm.length;
- Value ref_object = Pop();
- if (this->failed()) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- switch (ref_object.type.kind()) {
- case ValueType::kRef: {
- auto* result = Push(
- ValueType(ValueType::kRef, ref_object.type.ref_index()));
- CALL_INTERFACE(PassThrough, ref_object, result);
- break;
- }
- case ValueType::kOptRef: {
- // We need to Push the result value after calling BrOnNull on
- // the interface. Therefore we must sync the ref_object and
- // result nodes afterwards (in PassThrough).
- CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
- auto* result = Push(
- ValueType(ValueType::kRef, ref_object.type.ref_index()));
- CALL_INTERFACE(PassThrough, ref_object, result);
- c->br_merge()->reached = true;
- break;
- }
- case ValueType::kNullRef:
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- EndControl();
- break;
- default:
- this->error(this->pc_,
- "invalid agrument type to ref.as_non_null");
- break;
- }
- }
+ Control* c = &control_.back();
+ if (!VALIDATE(c->is_if())) {
+ this->error(this->pc_, "else does not match an if");
break;
}
- case kExprLoop: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto args = PopArgs(imm.sig);
- auto* block = PushControl(kControlLoop);
- SetBlockType(&control_.back(), imm, args.begin());
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(Loop, block);
- PushMergeValues(block, &block->start_merge);
+ if (c->is_if_else()) {
+ this->error(this->pc_, "else already present for if");
break;
}
- case kExprIf: {
- BlockTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (!this->Validate(imm)) break;
- auto cond = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- if (!VALIDATE(this->ok())) break;
- auto* if_block = PushControl(kControlIf);
- SetBlockType(if_block, imm, args.begin());
- CALL_INTERFACE_IF_REACHABLE(If, cond, if_block);
- len = 1 + imm.length;
- PushMergeValues(if_block, &if_block->start_merge);
+ if (!TypeCheckFallThru()) break;
+ c->kind = kControlIfElse;
+ CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
+ if (c->reachable()) c->end_merge.reached = true;
+ PushMergeValues(c, &c->start_merge);
+ c->reachability = control_at(1)->innerReachability();
+ current_code_reachable_ = this->ok() && c->reachable();
+ break;
+ }
+ case kExprEnd: {
+ if (!VALIDATE(!control_.empty())) {
+ this->error("end does not match any if, try, or block");
break;
}
- case kExprElse: {
- if (!VALIDATE(!control_.empty())) {
- this->error("else does not match any if");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(c->is_if())) {
- this->error(this->pc_, "else does not match an if");
- break;
- }
- if (c->is_if_else()) {
- this->error(this->pc_, "else already present for if");
- break;
- }
- if (!TypeCheckFallThru()) break;
- c->kind = kControlIfElse;
- CALL_INTERFACE_IF_PARENT_REACHABLE(Else, c);
- if (c->reachable()) c->end_merge.reached = true;
- PushMergeValues(c, &c->start_merge);
- c->reachability = control_at(1)->innerReachability();
+ Control* c = &control_.back();
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->error(this->pc_, "missing catch or catch-all in try");
break;
}
- case kExprEnd: {
- if (!VALIDATE(!control_.empty())) {
- this->error("end does not match any if, try, or block");
- break;
- }
- Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->error(this->pc_, "missing catch or catch-all in try");
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
+ this->error(c->pc,
+ "start-arity and end-arity of one-armed if must match");
break;
}
- if (c->is_onearmed_if()) {
- if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->error(
- c->pc,
- "start-arity and end-arity of one-armed if must match");
- break;
- }
- if (!TypeCheckOneArmedIf(c)) break;
- }
- if (!TypeCheckFallThru()) break;
-
- if (control_.size() == 1) {
- // If at the last (implicit) control, check we are at end.
- if (!VALIDATE(this->pc_ + 1 == this->end_)) {
- this->error(this->pc_ + 1, "trailing code after function end");
- break;
- }
- // The result of the block is the return value.
- TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
- "(implicit) return");
- DoReturn();
- control_.clear();
- break;
- }
- PopControl(c);
- break;
+ if (!TypeCheckOneArmedIf(c)) break;
+ }
+ if (c->is_let()) {
+ this->local_types_->erase(
+ this->local_types_->begin(),
+ this->local_types_->begin() + c->locals_count);
+ CALL_INTERFACE_IF_REACHABLE(DeallocateLocals, c->locals_count);
}
- case kExprSelect: {
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop();
- auto tval = Pop(0, fval.type);
- ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
- if (type.IsSubTypeOf(kWasmAnyRef)) {
- this->error(
- "select without type is only valid for value type inputs");
+ if (!TypeCheckFallThru()) break;
+
+ if (control_.size() == 1) {
+ // If at the last (implicit) control, check we are at end.
+ if (!VALIDATE(this->pc_ + 1 == this->end_)) {
+ this->error(this->pc_ + 1, "trailing code after function end");
break;
}
- auto* result = Push(type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- break;
- }
- case kExprSelectWithType: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
- if (this->failed()) break;
- auto cond = Pop(2, kWasmI32);
- auto fval = Pop(1, imm.type);
- auto tval = Pop(0, imm.type);
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
- len = 1 + imm.length;
+ // The result of the block is the return value.
+ TRACE_PART("\n" TRACE_INST_FORMAT, startrel(this->pc_),
+ "(implicit) return");
+ DoReturn();
+ control_.clear();
break;
}
- case kExprBr: {
- BranchDepthImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- if (imm.depth == control_.size() - 1) {
- DoReturn();
- } else {
- CALL_INTERFACE(Br, c);
- c->br_merge()->reached = true;
- }
- } else if (check_result == kInvalidStack) {
- break;
- }
- len = 1 + imm.length;
- EndControl();
+ PopControl(c);
+ break;
+ }
+ case kExprSelect: {
+ Value cond = Pop(2, kWasmI32);
+ Value fval = Pop();
+ Value tval = Pop(0, fval.type);
+ ValueType type = tval.type == kWasmBottom ? fval.type : tval.type;
+ if (type.is_reference_type()) {
+ this->error(
+ "select without type is only valid for value type inputs");
break;
}
- case kExprBrIf: {
- BranchDepthImmediate<validate> imm(this, this->pc_);
- auto cond = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
- Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE(BrIf, cond, imm.depth);
+ Value* result = Push(type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ break;
+ }
+ case kExprSelectWithType: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ SelectTypeImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (this->failed()) break;
+ Value cond = Pop(2, kWasmI32);
+ Value fval = Pop(1, imm.type);
+ Value tval = Pop(0, imm.type);
+ Value* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(Select, cond, fval, tval, result);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBr: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, false);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ if (imm.depth == control_.size() - 1) {
+ DoReturn();
+ } else {
+ CALL_INTERFACE(Br, c);
c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- break;
}
- len = 1 + imm.length;
+ } else if (check_result == kInvalidStack) {
break;
}
- case kExprBrTable: {
- BranchTableImmediate<validate> imm(this, this->pc_);
- BranchTableIterator<validate> iterator(this, imm);
- auto key = Pop(0, kWasmI32);
- if (this->failed()) break;
- if (!this->Validate(this->pc_, imm, control_.size())) break;
-
- // Cache the branch targets during the iteration, so that we can set
- // all branch targets as reachable after the {CALL_INTERFACE} call.
- std::vector<bool> br_targets(control_.size());
-
- // The result types of the br_table instruction. We have to check the
- // stack against these types. Only needed during validation.
- std::vector<ValueType> result_types;
-
- while (iterator.has_next()) {
- const uint32_t index = iterator.cur_index();
- const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
- // Avoid redundant branch target checks.
- if (br_targets[target]) continue;
- br_targets[target] = true;
-
- if (validate) {
- if (index == 0) {
- // With the first branch target, initialize the result types.
- result_types = InitializeBrTableResultTypes(target);
- } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
- index)) {
- break;
- }
+ len = 1 + imm.length;
+ EndControl();
+ break;
+ }
+ case kExprBrIf: {
+ BranchDepthImmediate<validate> imm(this, this->pc_);
+ Value cond = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+ Control* c = control_at(imm.depth);
+ TypeCheckBranchResult check_result = TypeCheckBranch(c, true);
+ if (V8_LIKELY(check_result == kReachableBranch)) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
+ c->br_merge()->reached = true;
+ } else if (check_result == kInvalidStack) {
+ break;
+ }
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprBrTable: {
+ BranchTableImmediate<validate> imm(this, this->pc_);
+ BranchTableIterator<validate> iterator(this, imm);
+ Value key = Pop(0, kWasmI32);
+ if (this->failed()) break;
+ if (!this->Validate(this->pc_, imm, control_.size())) break;
+
+ // Cache the branch targets during the iteration, so that we can set
+ // all branch targets as reachable after the {CALL_INTERFACE} call.
+ std::vector<bool> br_targets(control_.size());
+
+ // The result types of the br_table instruction. We have to check the
+ // stack against these types. Only needed during validation.
+ std::vector<ValueType> result_types;
+
+ while (iterator.has_next()) {
+ const uint32_t index = iterator.cur_index();
+ const byte* pos = iterator.pc();
+ uint32_t target = iterator.next();
+ if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) break;
+ // Avoid redundant branch target checks.
+ if (br_targets[target]) continue;
+ br_targets[target] = true;
+
+ if (validate) {
+ if (index == 0) {
+ // With the first branch target, initialize the result types.
+ result_types = InitializeBrTableResultTypes(target);
+ } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
+ index)) {
+ break;
}
}
+ }
- if (!VALIDATE(TypeCheckBrTable(result_types))) break;
+ if (!VALIDATE(TypeCheckBrTable(result_types))) break;
- DCHECK(this->ok());
+ DCHECK(this->ok());
- if (control_.back().reachable()) {
- CALL_INTERFACE(BrTable, imm, key);
+ if (current_code_reachable_) {
+ CALL_INTERFACE(BrTable, imm, key);
- for (int i = 0, e = control_depth(); i < e; ++i) {
- if (!br_targets[i]) continue;
- control_at(i)->br_merge()->reached = true;
- }
+ for (int i = 0, e = control_depth(); i < e; ++i) {
+ if (!br_targets[i]) continue;
+ control_at(i)->br_merge()->reached = true;
}
-
- len = 1 + iterator.length();
- EndControl();
- break;
}
- case kExprReturn: {
- if (V8_LIKELY(control_.back().reachable())) {
- if (!VALIDATE(TypeCheckReturn())) break;
- DoReturn();
- } else {
- // We pop all return values from the stack to check their type.
- // Since we deal with unreachable code, we do not have to keep the
- // values.
- int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = num_returns - 1; i >= 0; --i) {
- Pop(i, this->sig_->GetReturn(i));
- }
- }
- EndControl();
- break;
- }
- case kExprUnreachable: {
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmI64);
- CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF32);
- CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<validate> imm(this, this->pc_);
- auto* value = Push(kWasmF64);
- CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- auto* value = Push(kWasmNullRef);
- CALL_INTERFACE_IF_REACHABLE(RefNull, value);
- len = 1;
- break;
- }
- case kExprRefFunc: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- FunctionIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(kWasmFuncRef);
- CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
- len = 1 + imm.length;
- break;
- }
- case kExprRefAsNonNull: {
- CHECK_PROTOTYPE_OPCODE(gc);
- auto value = Pop();
- switch (value.type.kind()) {
- case ValueType::kRef: {
- auto* result =
- Push(ValueType(ValueType::kRef, value.type.ref_index()));
- CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
- break;
- }
- case ValueType::kOptRef: {
- auto* result =
- Push(ValueType(ValueType::kRef, value.type.ref_index()));
- CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
- break;
- }
- case ValueType::kNullRef:
- // TODO(7748): Fix this once the standard clears up (see
- // https://github.com/WebAssembly/function-references/issues/21).
- CALL_INTERFACE_IF_REACHABLE(Unreachable);
- EndControl();
- break;
- default:
- this->error(this->pc_ + 1,
- "invalid agrument type to ref.as_non_null");
- break;
+ len = 1 + iterator.length();
+ EndControl();
+ break;
+ }
+ case kExprReturn: {
+ if (V8_LIKELY(current_code_reachable_)) {
+ if (!VALIDATE(TypeCheckReturn())) break;
+ DoReturn();
+ } else {
+ // We pop all return values from the stack to check their type.
+ // Since we deal with unreachable code, we do not have to keep the
+ // values.
+ int num_returns = static_cast<int>(this->sig_->return_count());
+ for (int i = num_returns - 1; i >= 0; --i) {
+ Pop(i, this->sig_->GetReturn(i));
}
- break;
- }
- case kExprLocalGet: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto* value = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
- len = 1 + imm.length;
- break;
}
- case kExprLocalSet: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprLocalTee: {
- LocalIndexImmediate<validate> imm(this, this->pc_);
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(0, local_type_vec_[imm.index]);
- auto* result = Push(value.type);
- CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- auto value = Pop();
- CALL_INTERFACE_IF_REACHABLE(Drop, value);
+
+ EndControl();
+ break;
+ }
+ case kExprUnreachable: {
+ CALL_INTERFACE_IF_REACHABLE(Unreachable);
+ EndControl();
+ break;
+ }
+ case kExprI32Const: {
+ ImmI32Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(I32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprI64Const: {
+ ImmI64Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmI64);
+ CALL_INTERFACE_IF_REACHABLE(I64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF32Const: {
+ ImmF32Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmF32);
+ CALL_INTERFACE_IF_REACHABLE(F32Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprF64Const: {
+ ImmF64Immediate<validate> imm(this, this->pc_);
+ Value* value = Push(kWasmF64);
+ CALL_INTERFACE_IF_REACHABLE(F64Const, value, imm.value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefNull: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ RefNullImmediate<validate> imm(this->enabled_, this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(RefNull, value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefIsNull: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ Value value = Pop();
+ Value* result = Push(kWasmI32);
+ len = 1;
+ if (value.type.is_nullable()) {
+ CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, value, result);
break;
}
- case kExprGlobalGet: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto* result = Push(imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
+ if (value.type.is_reference_type()) {
+ // Due to the check above, we know that the value is not null.
+ CALL_INTERFACE_IF_REACHABLE(I32Const, result, 0);
break;
}
- case kExprGlobalSet: {
- GlobalIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!VALIDATE(imm.global->mutability)) {
- this->errorf(this->pc_, "immutable global #%u cannot be assigned",
- imm.index);
+ this->errorf(this->pc_,
+ "invalid argument type to ref.is_null. Expected "
+ "reference type, got %s",
+ value.type.type_name().c_str());
+ break;
+ }
+ case kExprRefFunc: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ FunctionIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(ValueType::Ref(kHeapFunc, kNonNullable));
+ CALL_INTERFACE_IF_REACHABLE(RefFunc, imm.index, value);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprRefAsNonNull: {
+ CHECK_PROTOTYPE_OPCODE(typed_funcref);
+ Value value = Pop();
+ switch (value.type.kind()) {
+ case ValueType::kRef: {
+ Value* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(PassThrough, value, result);
break;
}
- auto value = Pop(0, imm.type);
- CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
- break;
- }
- case kExprTableGet: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- TableIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- DCHECK_NOT_NULL(this->module_);
- auto index = Pop(0, kWasmI32);
- auto* result = Push(this->module_->tables[imm.index].type);
- CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
- break;
- }
- case kExprTableSet: {
- CHECK_PROTOTYPE_OPCODE(anyref);
- TableIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto value = Pop(1, this->module_->tables[imm.index].type);
- auto index = Pop(0, kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
- break;
- }
-
- case kExprI32LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
- break;
- case kExprI32LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
- break;
- case kExprI32LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
- break;
- case kExprI32LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
- break;
- case kExprI32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI32Load);
- break;
- case kExprI64LoadMem8S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
- break;
- case kExprI64LoadMem8U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
- break;
- case kExprI64LoadMem16S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
- break;
- case kExprI64LoadMem16U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
- break;
- case kExprI64LoadMem32S:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
- break;
- case kExprI64LoadMem32U:
- len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
- break;
- case kExprI64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kI64Load);
- break;
- case kExprF32LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF32Load);
- break;
- case kExprF64LoadMem:
- len = 1 + DecodeLoadMem(LoadType::kF64Load);
- break;
- case kExprI32StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI32Store8);
- break;
- case kExprI32StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI32Store16);
- break;
- case kExprI32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI32Store);
- break;
- case kExprI64StoreMem8:
- len = 1 + DecodeStoreMem(StoreType::kI64Store8);
- break;
- case kExprI64StoreMem16:
- len = 1 + DecodeStoreMem(StoreType::kI64Store16);
- break;
- case kExprI64StoreMem32:
- len = 1 + DecodeStoreMem(StoreType::kI64Store32);
- break;
- case kExprI64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kI64Store);
- break;
- case kExprF32StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF32Store);
- break;
- case kExprF64StoreMem:
- len = 1 + DecodeStoreMem(StoreType::kF64Store);
- break;
- case kExprMemoryGrow: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- DCHECK_NOT_NULL(this->module_);
- if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
- this->error("grow_memory is not supported for asmjs modules");
+ case ValueType::kOptRef: {
+ Value* result =
+ Push(ValueType::Ref(value.type.heap_type(), kNonNullable));
+ CALL_INTERFACE_IF_REACHABLE(RefAsNonNull, value, result);
break;
}
- auto value = Pop(0, kWasmI32);
- auto* result = Push(kWasmI32);
- CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
- break;
+ default:
+ this->error(this->pc_ + 1,
+ "invalid agrument type to ref.as_non_null");
+ break;
}
- case kExprMemorySize: {
- if (!CheckHasMemory()) break;
- MemoryIndexImmediate<validate> imm(this, this->pc_);
- auto* result = Push(kWasmI32);
- len = 1 + imm.length;
- CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprLocalGet: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* value = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(LocalGet, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprLocalSet: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(0, local_type_vec_[imm.index]);
+ CALL_INTERFACE_IF_REACHABLE(LocalSet, value, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprLocalTee: {
+ LocalIndexImmediate<validate> imm(this, this->pc_);
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(0, local_type_vec_[imm.index]);
+ Value* result = Push(value.type);
+ CALL_INTERFACE_IF_REACHABLE(LocalTee, value, result, imm);
+ len = 1 + imm.length;
+ break;
+ }
+ case kExprDrop: {
+ Value value = Pop();
+ CALL_INTERFACE_IF_REACHABLE(Drop, value);
+ break;
+ }
+ case kExprGlobalGet: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value* result = Push(imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GlobalGet, result, imm);
+ break;
+ }
+ case kExprGlobalSet: {
+ GlobalIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!VALIDATE(imm.global->mutability)) {
+ this->errorf(this->pc_, "immutable global #%u cannot be assigned",
+ imm.index);
break;
}
- case kExprCallFunction: {
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
+ Value value = Pop(0, imm.type);
+ CALL_INTERFACE_IF_REACHABLE(GlobalSet, value, imm);
+ break;
+ }
+ case kExprTableGet: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(this->module_->tables[imm.index].type);
+ CALL_INTERFACE_IF_REACHABLE(TableGet, index, result, imm);
+ break;
+ }
+ case kExprTableSet: {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ TableIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value value = Pop(1, this->module_->tables[imm.index].type);
+ Value index = Pop(0, kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(TableSet, index, value, imm);
+ break;
+ }
+
+ case kExprI32LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8S);
+ break;
+ case kExprI32LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load8U);
+ break;
+ case kExprI32LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16S);
+ break;
+ case kExprI32LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load16U);
+ break;
+ case kExprI32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI32Load);
+ break;
+ case kExprI64LoadMem8S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8S);
+ break;
+ case kExprI64LoadMem8U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load8U);
+ break;
+ case kExprI64LoadMem16S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16S);
+ break;
+ case kExprI64LoadMem16U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load16U);
+ break;
+ case kExprI64LoadMem32S:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32S);
+ break;
+ case kExprI64LoadMem32U:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load32U);
+ break;
+ case kExprI64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kI64Load);
+ break;
+ case kExprF32LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF32Load);
+ break;
+ case kExprF64LoadMem:
+ len = 1 + DecodeLoadMem(LoadType::kF64Load);
+ break;
+ case kExprI32StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store8);
+ break;
+ case kExprI32StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store16);
+ break;
+ case kExprI32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI32Store);
+ break;
+ case kExprI64StoreMem8:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store8);
+ break;
+ case kExprI64StoreMem16:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store16);
+ break;
+ case kExprI64StoreMem32:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store32);
+ break;
+ case kExprI64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kI64Store);
+ break;
+ case kExprF32StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF32Store);
+ break;
+ case kExprF64StoreMem:
+ len = 1 + DecodeStoreMem(StoreType::kF64Store);
+ break;
+ case kExprMemoryGrow: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!VALIDATE(this->module_->origin == kWasmOrigin)) {
+ this->error("grow_memory is not supported for asmjs modules");
break;
}
- case kExprCallIndirect: {
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- auto index = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- auto* returns = PushReturns(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
- returns);
+ Value value = Pop(0, kWasmI32);
+ Value* result = Push(kWasmI32);
+ CALL_INTERFACE_IF_REACHABLE(MemoryGrow, value, result);
+ break;
+ }
+ case kExprMemorySize: {
+ if (!CheckHasMemory()) break;
+ MemoryIndexImmediate<validate> imm(this, this->pc_);
+ Value* result = Push(kWasmI32);
+ len = 1 + imm.length;
+ CALL_INTERFACE_IF_REACHABLE(CurrentMemoryPages, result);
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ ArgVector args = PopArgs(imm.sig);
+ Value* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallDirect, imm, args.begin(), returns);
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ Value index = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ Value* returns = PushReturns(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(CallIndirect, index, imm, args.begin(),
+ returns);
+ break;
+ }
+ case kExprReturnCall: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+
+ CallFunctionImmediate<validate> imm(this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
break;
}
- case kExprReturnCall: {
- CHECK_PROTOTYPE_OPCODE(return_call);
- CallFunctionImmediate<validate> imm(this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!this->CanReturnCall(imm.sig)) {
- OPCODE_ERROR(opcode, "tail call return types mismatch");
- break;
- }
+ ArgVector args = PopArgs(imm.sig);
- auto args = PopArgs(imm.sig);
-
- CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
- EndControl();
- break;
- }
- case kExprReturnCallIndirect: {
- CHECK_PROTOTYPE_OPCODE(return_call);
- CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
- len = 1 + imm.length;
- if (!this->Validate(this->pc_, imm)) break;
- if (!this->CanReturnCall(imm.sig)) {
- OPCODE_ERROR(opcode, "tail call return types mismatch");
- break;
- }
- auto index = Pop(0, kWasmI32);
- auto args = PopArgs(imm.sig);
- CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
- args.begin());
- EndControl();
- break;
- }
- case kNumericPrefix: {
- ++len;
- byte numeric_index =
- this->template read_u8<validate>(this->pc_ + 1, "numeric index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | numeric_index);
- if (opcode == kExprTableGrow || opcode == kExprTableSize ||
- opcode == kExprTableFill) {
- CHECK_PROTOTYPE_OPCODE(anyref);
- } else if (opcode >= kExprMemoryInit) {
- CHECK_PROTOTYPE_OPCODE(bulk_memory);
- }
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeNumericOpcode(opcode);
- break;
- }
- case kSimdPrefix: {
- CHECK_PROTOTYPE_OPCODE(simd);
- uint32_t length = 0;
- opcode =
- this->template read_prefixed_opcode<validate>(this->pc_, &length);
- if (!VALIDATE(this->ok())) break;
- len += length;
-
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeSimdOpcode(opcode, length);
- break;
- }
- case kAtomicPrefix: {
- CHECK_PROTOTYPE_OPCODE(threads);
- len++;
- byte atomic_index =
- this->template read_u8<validate>(this->pc_ + 1, "atomic index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | atomic_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len += DecodeAtomicOpcode(opcode);
+ CALL_INTERFACE_IF_REACHABLE(ReturnCall, imm, args.begin());
+ EndControl();
+ break;
+ }
+ case kExprReturnCallIndirect: {
+ CHECK_PROTOTYPE_OPCODE(return_call);
+ CallIndirectImmediate<validate> imm(this->enabled_, this, this->pc_);
+ len = 1 + imm.length;
+ if (!this->Validate(this->pc_, imm)) break;
+ if (!this->CanReturnCall(imm.sig)) {
+ OPCODE_ERROR(opcode, "tail call return types mismatch");
break;
}
- case kGCPrefix: {
- CHECK_PROTOTYPE_OPCODE(gc);
- byte gc_index =
- this->template read_u8<validate>(this->pc_ + 1, "gc index");
- opcode = static_cast<WasmOpcode>(opcode << 8 | gc_index);
- TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
- WasmOpcodes::OpcodeName(opcode));
- len = DecodeGCOpcode(opcode);
- break;
+ Value index = Pop(0, kWasmI32);
+ ArgVector args = PopArgs(imm.sig);
+ CALL_INTERFACE_IF_REACHABLE(ReturnCallIndirect, index, imm,
+ args.begin());
+ EndControl();
+ break;
+ }
+ case kNumericPrefix: {
+ ++len;
+ byte numeric_index =
+ this->template read_u8<validate>(this->pc_ + 1, "numeric index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | numeric_index);
+ if (full_opcode == kExprTableGrow || full_opcode == kExprTableSize ||
+ full_opcode == kExprTableFill) {
+ CHECK_PROTOTYPE_OPCODE(reftypes);
+ } else if (full_opcode >= kExprMemoryInit) {
+ CHECK_PROTOTYPE_OPCODE(bulk_memory);
}
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeNumericOpcode(full_opcode);
+ break;
+ }
+ case kSimdPrefix: {
+ CHECK_PROTOTYPE_OPCODE(simd);
+ uint32_t length = 0;
+ WasmOpcode full_opcode =
+ this->template read_prefixed_opcode<validate>(this->pc_, &length);
+ if (!VALIDATE(this->ok())) break;
+ len += length;
+
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeSimdOpcode(full_opcode, length);
+ break;
+ }
+ case kAtomicPrefix: {
+ CHECK_PROTOTYPE_OPCODE(threads);
+ len++;
+ byte atomic_index =
+ this->template read_u8<validate>(this->pc_ + 1, "atomic index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | atomic_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len += DecodeAtomicOpcode(full_opcode);
+ break;
+ }
+ case kGCPrefix: {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ byte gc_index =
+ this->template read_u8<validate>(this->pc_ + 1, "gc index");
+ WasmOpcode full_opcode =
+ static_cast<WasmOpcode>(opcode << 8 | gc_index);
+ TRACE_PART(TRACE_INST_FORMAT, startrel(this->pc_),
+ WasmOpcodes::OpcodeName(full_opcode));
+ len = DecodeGCOpcode(full_opcode);
+ break;
+ }
// Note that prototype opcodes are not handled in the fastpath
// above this switch, to avoid checking a feature flag.
#define SIMPLE_PROTOTYPE_CASE(name, opc, sig) \
case kExpr##name: /* fallthrough */
- FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
+ FOREACH_SIMPLE_PROTOTYPE_OPCODE(SIMPLE_PROTOTYPE_CASE)
#undef SIMPLE_PROTOTYPE_CASE
- BuildSimplePrototypeOperator(opcode);
- break;
- default: {
- // Deal with special asmjs opcodes.
- if (this->module_ != nullptr && is_asmjs_module(this->module_)) {
- const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
- if (sig) {
- BuildSimpleOperator(opcode, sig);
- }
- } else {
- this->error("Invalid opcode");
- return;
+ BuildSimplePrototypeOperator(opcode);
+ break;
+ default: {
+ // Deal with special asmjs opcodes.
+ if (is_asmjs_module(this->module_)) {
+ const FunctionSig* sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) {
+ BuildSimpleOperator(opcode, sig);
}
+ } else {
+ this->error("Invalid opcode");
+ return 0;
}
}
+ }
#if DEBUG
- if (FLAG_trace_wasm_decoder) {
- TRACE_PART(" ");
- for (Control& c : control_) {
- switch (c.kind) {
- case kControlIf:
- TRACE_PART("I");
- break;
- case kControlBlock:
- TRACE_PART("B");
- break;
- case kControlLoop:
- TRACE_PART("L");
- break;
- case kControlTry:
- TRACE_PART("T");
- break;
- default:
- break;
- }
- if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
- TRACE_PART("%u", c.end_merge.arity);
- if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
+ if (FLAG_trace_wasm_decoder) {
+ TRACE_PART(" ");
+ for (Control& c : control_) {
+ switch (c.kind) {
+ case kControlIf:
+ TRACE_PART("I");
+ break;
+ case kControlBlock:
+ TRACE_PART("B");
+ break;
+ case kControlLoop:
+ TRACE_PART("L");
+ break;
+ case kControlTry:
+ TRACE_PART("T");
+ break;
+ case kControlIfElse:
+ case kControlTryCatch:
+ case kControlLet: // TODO(7748): Implement
+ break;
}
- TRACE_PART(" | ");
- for (size_t i = 0; i < stack_.size(); ++i) {
- auto& val = stack_[i];
- WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = this->template read_prefixed_opcode<Decoder::kNoValidate>(
- val.pc);
+ if (c.start_merge.arity) TRACE_PART("%u-", c.start_merge.arity);
+ TRACE_PART("%u", c.end_merge.arity);
+ if (!c.reachable()) TRACE_PART("%c", c.unreachable() ? '*' : '#');
+ }
+ TRACE_PART(" | ");
+ for (size_t i = 0; i < stack_.size(); ++i) {
+ Value& val = stack_[i];
+ WasmOpcode val_opcode = static_cast<WasmOpcode>(*val.pc);
+ if (WasmOpcodes::IsPrefixOpcode(val_opcode)) {
+ val_opcode =
+ this->template read_prefixed_opcode<Decoder::kNoValidate>(val.pc);
+ }
+ TRACE_PART(" %c@%d:%s", val.type.short_name(),
+ static_cast<int>(val.pc - this->start_),
+ WasmOpcodes::OpcodeName(val_opcode));
+ // If the decoder failed, don't try to decode the immediates, as this
+ // can trigger a DCHECK failure.
+ if (this->failed()) continue;
+ switch (val_opcode) {
+ case kExprI32Const: {
+ ImmI32Immediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%d]", imm.value);
+ break;
}
- TRACE_PART(" %c@%d:%s", val.type.short_name(),
- static_cast<int>(val.pc - this->start_),
- WasmOpcodes::OpcodeName(opcode));
- // If the decoder failed, don't try to decode the immediates, as this
- // can trigger a DCHECK failure.
- if (this->failed()) continue;
- switch (opcode) {
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%d]", imm.value);
- break;
- }
- case kExprLocalGet:
- case kExprLocalSet:
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%u]", imm.index);
- break;
- }
- case kExprGlobalGet:
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
- TRACE_PART("[%u]", imm.index);
- break;
- }
- default:
- break;
+ case kExprLocalGet:
+ case kExprLocalSet:
+ case kExprLocalTee: {
+ LocalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%u]", imm.index);
+ break;
+ }
+ case kExprGlobalGet:
+ case kExprGlobalSet: {
+ GlobalIndexImmediate<Decoder::kNoValidate> imm(this, val.pc);
+ TRACE_PART("[%u]", imm.index);
+ break;
}
+ default:
+ break;
}
}
+ }
#endif
+ return len;
+ }
+
+ using OpcodeHandler = int (*)(WasmFullDecoder*);
+
+ template <size_t idx>
+ struct GetOpcodeHandlerTableEntry
+ : public std::integral_constant<
+ OpcodeHandler,
+ &WasmFullDecoder::DecodeOp<static_cast<WasmOpcode>(idx)>> {};
+
+ OpcodeHandler GetOpcodeHandler(uint8_t opcode) {
+ static constexpr std::array<OpcodeHandler, 256> kOpcodeHandlers =
+ base::make_array<256, GetOpcodeHandlerTableEntry>();
+ return kOpcodeHandlers[opcode];
+ }
+
+ void DecodeFunctionBody() {
+ TRACE("wasm-decode %p...%p (module+%u, %d bytes)\n", this->start(),
+ this->end(), this->pc_offset(),
+ static_cast<int>(this->end() - this->start()));
+
+ // Set up initial function block.
+ {
+ Control* c = PushControl(kControlBlock);
+ InitMerge(&c->start_merge, 0, [](uint32_t) -> Value { UNREACHABLE(); });
+ InitMerge(&c->end_merge,
+ static_cast<uint32_t>(this->sig_->return_count()),
+ [&](uint32_t i) {
+ return Value{this->pc_, this->sig_->GetReturn(i)};
+ });
+ CALL_INTERFACE(StartFunctionBody, c);
+ }
+
+ // Decode the function body.
+ while (this->pc_ < this->end_) {
+ uint8_t first_byte = *this->pc_;
+ CALL_INTERFACE_IF_REACHABLE(NextInstruction,
+ static_cast<WasmOpcode>(first_byte));
+ OpcodeHandler handler = GetOpcodeHandler(first_byte);
+ int len = (*handler)(this);
this->pc_ += len;
- } // end decode loop
+ }
+
if (!VALIDATE(this->pc_ == this->end_) && this->ok()) {
this->error("Beyond end of code");
}
@@ -2728,13 +2925,14 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void EndControl() {
DCHECK(!control_.empty());
- auto* current = &control_.back();
+ Control* current = &control_.back();
stack_.erase(stack_.begin() + current->stack_depth, stack_.end());
CALL_INTERFACE_IF_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
+ current_code_reachable_ = false;
}
- template<typename func>
+ template <typename func>
void InitMerge(Merge<Value>* merge, uint32_t arity, func get_val) {
merge->arity = arity;
if (arity == 1) {
@@ -2771,7 +2969,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
int count = static_cast<int>(type->field_count());
ArgVector args(count);
for (int i = count - 1; i >= 0; i--) {
- args[i] = Pop(i, type->field(i));
+ args[i] = Pop(i, type->field(i).Unpacked());
+ }
+ return args;
+ }
+
+ V8_INLINE ArgVector PopArgs(uint32_t base_index,
+ Vector<ValueType> arg_types) {
+ ArgVector args(arg_types.size());
+ for (int i = static_cast<int>(arg_types.size()) - 1; i >= 0; i--) {
+ args[i] = Pop(base_index + i, arg_types[i]);
}
return args;
}
@@ -2781,10 +2988,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
- Control* PushControl(ControlKind kind) {
+ Control* PushControl(ControlKind kind, uint32_t locals_count = 0) {
Reachability reachability =
control_.empty() ? kReachable : control_.back().innerReachability();
- control_.emplace_back(kind, stack_size(), this->pc_, reachability);
+ control_.emplace_back(kind, locals_count, stack_size(), this->pc_,
+ reachability);
+ current_code_reachable_ = this->ok() && reachability == kReachable;
return &control_.back();
}
@@ -2800,17 +3009,16 @@ class WasmFullDecoder : public WasmDecoder<validate> {
control_.pop_back();
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
- if (!parent_reached && control_.back().reachable()) {
- control_.back().reachability = kSpecOnlyReachable;
- }
+ if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
+ current_code_reachable_ = control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 0) {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
type.size_log_2());
- auto index = Pop(0, kWasmI32);
- auto* result = Push(type.value_type());
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(type.value_type());
CALL_INTERFACE_IF_REACHABLE(LoadMem, type, imm, index, result);
return imm.length;
}
@@ -2823,8 +3031,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
transform == LoadTransformationKind::kExtend ? 3 : type.size_log_2();
MemoryAccessImmediate<validate> imm(this, this->pc_ + opcode_length,
max_alignment);
- auto index = Pop(0, kWasmI32);
- auto* result = Push(kWasmS128);
+ Value index = Pop(0, kWasmI32);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(LoadTransform, type, transform, imm, index,
result);
return imm.length;
@@ -2834,8 +3042,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!CheckHasMemory()) return 0;
MemoryAccessImmediate<validate> imm(this, this->pc_ + prefix_len,
store.size_log_2());
- auto value = Pop(1, store.value_type());
- auto index = Pop(0, kWasmI32);
+ Value value = Pop(1, store.value_type());
+ Value index = Pop(0, kWasmI32);
CALL_INTERFACE_IF_REACHABLE(StoreMem, store, imm, index, value);
return imm.length;
}
@@ -2850,7 +3058,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
- auto* merge = control_at(target)->br_merge();
+ Merge<Value>* merge = control_at(target)->br_merge();
int br_arity = merge->arity;
std::vector<ValueType> result(br_arity);
for (int i = 0; i < br_arity; ++i) {
@@ -2861,7 +3069,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
uint32_t target, const byte* pos, int index) {
- auto* merge = control_at(target)->br_merge();
+ Merge<Value>* merge = control_at(target)->br_merge();
int br_arity = merge->arity;
// First we check if the arities match.
if (br_arity != static_cast<int>(result_types->size())) {
@@ -2873,18 +3081,27 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
for (int i = 0; i < br_arity; ++i) {
- if (this->enabled_.has_anyref()) {
+ if (this->enabled_.has_reftypes()) {
// The expected type is the biggest common sub type of all targets.
+ ValueType type = (*result_types)[i];
(*result_types)[i] =
- ValueType::CommonSubType((*result_types)[i], (*merge)[i].type);
+ CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
+ if ((*result_types)[i] == kWasmBottom) {
+ this->errorf(pos,
+ "inconsistent type in br_table target %u (previous "
+ "was %s, this one is %s)",
+ index, type.type_name().c_str(),
+ (*merge)[i].type.type_name().c_str());
+ return false;
+ }
} else {
// All target must have the same signature.
if ((*result_types)[i] != (*merge)[i].type) {
this->errorf(pos,
"inconsistent type in br_table target %u (previous "
"was %s, this one is %s)",
- index, (*result_types)[i].type_name(),
- (*merge)[i].type.type_name());
+ index, (*result_types)[i].type_name().c_str(),
+ (*merge)[i].type.type_name().c_str());
return false;
}
}
@@ -2909,10 +3126,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// Type-check the topmost br_arity values on the stack.
for (int i = 0; i < br_arity; ++i) {
Value& val = stack_values[i];
- if (!val.type.IsSubTypeOf(result_types[i])) {
+ if (!IsSubtypeOf(val.type, result_types[i], this->module_)) {
this->errorf(this->pc_,
"type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].type_name(), val.type.type_name());
+ result_types[i].type_name().c_str(),
+ val.type.type_name().c_str());
return false;
}
}
@@ -2928,7 +3146,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
SimdLaneImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, opcode, imm)) {
Value inputs[] = {Pop(0, kWasmS128)};
- auto* result = Push(type);
+ Value* result = Push(type);
CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
result);
}
@@ -2943,7 +3161,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
UnreachableValue(this->pc_)};
inputs[1] = Pop(1, type);
inputs[0] = Pop(0, kWasmS128);
- auto* result = Push(kWasmS128);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(SimdLaneOp, opcode, imm, ArrayVector(inputs),
result);
}
@@ -2953,9 +3171,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
uint32_t Simd8x16ShuffleOp(uint32_t opcode_length) {
Simd8x16ShuffleImmediate<validate> imm(this, this->pc_, opcode_length);
if (this->Validate(this->pc_, imm)) {
- auto input1 = Pop(1, kWasmS128);
- auto input0 = Pop(0, kWasmS128);
- auto* result = Push(kWasmS128);
+ Value input1 = Pop(1, kWasmS128);
+ Value input0 = Pop(0, kWasmS128);
+ Value* result = Push(kWasmS128);
CALL_INTERFACE_IF_REACHABLE(Simd8x16ShuffleOp, imm, input0, input1,
result);
}
@@ -3075,8 +3293,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("invalid simd opcode");
break;
}
- auto args = PopArgs(sig);
- auto* results =
+ ArgVector args = PopArgs(sig);
+ Value* results =
sig->return_count() == 0 ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(SimdOp, opcode, VectorOf(args), results);
}
@@ -3091,29 +3309,66 @@ class WasmFullDecoder : public WasmDecoder<validate> {
StructIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_, imm)) break;
- auto args = PopArgs(imm.struct_type);
- auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ ArgVector args = PopArgs(imm.struct_type);
+ Value* value = Push(
+ ValueType::Ref(static_cast<HeapType>(imm.index), kNonNullable));
CALL_INTERFACE_IF_REACHABLE(StructNew, imm, args.begin(), value);
break;
}
case kExprStructGet: {
FieldIndexImmediate<validate> field(this, this->pc_ + len);
if (!this->Validate(this->pc_ + len, field)) break;
+ ValueType field_type =
+ field.struct_index.struct_type->field(field.index);
+ if (field_type.is_packed()) {
+ this->error(this->pc_,
+ "struct.get used with a field of packed type. "
+ "Use struct.get_s or struct.get_u instead.");
+ break;
+ }
len += field.length;
- auto struct_obj =
- Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
- auto* value = Push(field.struct_index.struct_type->field(field.index));
- CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, value);
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
+ Value* value = Push(field_type);
+ CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field, true, value);
+ break;
+ }
+ case kExprStructGetU:
+ case kExprStructGetS: {
+ FieldIndexImmediate<validate> field(this, this->pc_ + len);
+ if (!this->Validate(this->pc_ + len, field)) break;
+ len += field.length;
+ ValueType field_type =
+ field.struct_index.struct_type->field(field.index);
+ if (!field_type.is_packed()) {
+ this->errorf(this->pc_,
+ "%s is only valid for packed struct fields. "
+ "Use struct.get instead.",
+ WasmOpcodes::OpcodeName(opcode));
+ break;
+ }
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
+ Value* value = Push(field_type.Unpacked());
+ CALL_INTERFACE_IF_REACHABLE(StructGet, struct_obj, field,
+ opcode == kExprStructGetS, value);
break;
}
case kExprStructSet: {
FieldIndexImmediate<validate> field(this, this->pc_ + len);
if (!this->Validate(this->pc_ + len, field)) break;
len += field.length;
- auto field_value = Pop(
- 0, ValueType(field.struct_index.struct_type->field(field.index)));
- auto struct_obj =
- Pop(0, ValueType(ValueType::kOptRef, field.struct_index.index));
+ const StructType* struct_type = field.struct_index.struct_type;
+ if (!struct_type->mutability(field.index)) {
+ this->error(this->pc_, "setting immutable struct field");
+ break;
+ }
+ Value field_value = Pop(1, struct_type->field(field.index).Unpacked());
+ Value struct_obj = Pop(
+ 0, ValueType::Ref(static_cast<HeapType>(field.struct_index.index),
+ kNullable));
CALL_INTERFACE_IF_REACHABLE(StructSet, struct_obj, field, field_value);
break;
}
@@ -3121,31 +3376,66 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_, imm)) break;
- auto length = Pop(0, kWasmI32);
- auto initial_value = Pop(0, imm.array_type->element_type());
- auto* value = Push(ValueType(ValueType::kRef, imm.index));
+ Value length = Pop(1, kWasmI32);
+ Value initial_value = Pop(0, imm.array_type->element_type().Unpacked());
+ Value* value = Push(
+ ValueType::Ref(static_cast<HeapType>(imm.index), kNonNullable));
CALL_INTERFACE_IF_REACHABLE(ArrayNew, imm, length, initial_value,
value);
break;
}
+ case kExprArrayGetS:
+ case kExprArrayGetU: {
+ ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ if (!imm.array_type->element_type().is_packed()) {
+ this->errorf(this->pc_,
+ "%s is only valid for packed arrays. "
+ "Use or array.get instead.",
+ WasmOpcodes::OpcodeName(opcode));
+ break;
+ }
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(imm.array_type->element_type().Unpacked());
+ // TODO(7748): Optimize this when array_obj is non-nullable ref.
+ CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index,
+ opcode == kExprArrayGetS, value);
+ break;
+ }
case kExprArrayGet: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto index = Pop(0, kWasmI32);
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
- auto* value = Push(imm.array_type->element_type());
+ if (imm.array_type->element_type().is_packed()) {
+ this->error(this->pc_,
+ "array.get used with a field of packed type. "
+ "Use array.get_s or array.get_u instead.");
+ break;
+ }
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(imm.array_type->element_type());
// TODO(7748): Optimize this when array_obj is non-nullable ref.
- CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, value);
+ CALL_INTERFACE_IF_REACHABLE(ArrayGet, array_obj, imm, index, true,
+ value);
break;
}
case kExprArraySet: {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto value = Pop(0, imm.array_type->element_type());
- auto index = Pop(0, kWasmI32);
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
+ if (!imm.array_type->mutability()) {
+ this->error(this->pc_, "setting element of immutable array");
+ break;
+ }
+ Value value = Pop(2, imm.array_type->element_type().Unpacked());
+ Value index = Pop(1, kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
// TODO(7748): Optimize this when array_obj is non-nullable ref.
CALL_INTERFACE_IF_REACHABLE(ArraySet, array_obj, imm, index, value);
break;
@@ -3154,11 +3444,22 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArrayIndexImmediate<validate> imm(this, this->pc_ + len);
len += imm.length;
if (!this->Validate(this->pc_ + len, imm)) break;
- auto array_obj = Pop(0, ValueType(ValueType::kOptRef, imm.index));
- auto* value = Push(kWasmI32);
+ Value array_obj =
+ Pop(0, ValueType::Ref(static_cast<HeapType>(imm.index), kNullable));
+ Value* value = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(ArrayLen, array_obj, value);
break;
}
+ case kExprRttCanon: {
+ // TODO(7748): Introduce HeapTypeImmediate and use that here.
+ TypeIndexImmediate<validate> imm(this, this->pc_ + len);
+ len += imm.length;
+ if (!this->Validate(this->pc_ + len, imm)) break;
+ Value* value =
+ Push(ValueType::Rtt(static_cast<HeapType>(imm.index), 1));
+ CALL_INTERFACE_IF_REACHABLE(RttCanon, imm, value);
+ break;
+ }
default:
this->error("invalid gc opcode");
return 0;
@@ -3209,8 +3510,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryAccessImmediate<validate> imm(
this, this->pc_ + 1, ElementSizeLog2Of(memtype.representation()));
len += imm.length;
- auto args = PopArgs(sig);
- auto result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
+ ArgVector args = PopArgs(sig);
+ Value* result = ret_type == kWasmStmt ? nullptr : Push(GetReturnType(sig));
CALL_INTERFACE_IF_REACHABLE(AtomicOp, opcode, VectorOf(args), imm, result);
return len;
}
@@ -3234,9 +3535,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto src = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value src = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryInit, imm, dst, src, size);
break;
}
@@ -3251,9 +3552,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto src = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value src = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryCopy, imm, dst, src, size);
break;
}
@@ -3261,9 +3562,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
MemoryIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm)) break;
len += imm.length;
- auto size = Pop(2, sig->GetParam(2));
- auto value = Pop(1, sig->GetParam(1));
- auto dst = Pop(0, sig->GetParam(0));
+ Value size = Pop(2, sig->GetParam(2));
+ Value value = Pop(1, sig->GetParam(1));
+ Value dst = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(MemoryFill, imm, dst, value, size);
break;
}
@@ -3271,7 +3572,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableInitImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto args = PopArgs(sig);
+ ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableInit, imm, VectorOf(args));
break;
}
@@ -3286,7 +3587,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableCopyImmediate<validate> imm(this, this->pc_);
if (!this->Validate(imm)) break;
len += imm.length;
- auto args = PopArgs(sig);
+ ArgVector args = PopArgs(sig);
CALL_INTERFACE_IF_REACHABLE(TableCopy, imm, VectorOf(args));
break;
}
@@ -3294,9 +3595,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto delta = Pop(1, sig->GetParam(1));
- auto value = Pop(0, this->module_->tables[imm.index].type);
- auto* result = Push(kWasmI32);
+ Value delta = Pop(1, sig->GetParam(1));
+ Value value = Pop(0, this->module_->tables[imm.index].type);
+ Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableGrow, imm, value, delta, result);
break;
}
@@ -3304,7 +3605,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto* result = Push(kWasmI32);
+ Value* result = Push(kWasmI32);
CALL_INTERFACE_IF_REACHABLE(TableSize, imm, result);
break;
}
@@ -3312,9 +3613,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
TableIndexImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_, imm)) break;
len += imm.length;
- auto count = Pop(2, sig->GetParam(2));
- auto value = Pop(1, this->module_->tables[imm.index].type);
- auto start = Pop(0, sig->GetParam(0));
+ Value count = Pop(2, sig->GetParam(2));
+ Value value = Pop(1, this->module_->tables[imm.index].type);
+ Value start = Pop(0, sig->GetParam(0));
CALL_INTERFACE_IF_REACHABLE(TableFill, imm, start, value, count);
break;
}
@@ -3330,6 +3631,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void DoReturn() {
size_t return_count = this->sig_->return_count();
+ if (return_count > 1) {
+ this->detected_->Add(kFeature_mv);
+ }
DCHECK_GE(stack_.size(), return_count);
Vector<Value> return_values =
return_count == 0
@@ -3370,12 +3674,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
V8_INLINE Value Pop(int index, ValueType expected) {
- auto val = Pop();
- if (!VALIDATE(val.type.IsSubTypeOf(expected) || val.type == kWasmBottom ||
- expected == kWasmBottom)) {
+ Value val = Pop();
+ if (!VALIDATE(IsSubtypeOf(val.type, expected, this->module_) ||
+ val.type == kWasmBottom || expected == kWasmBottom)) {
this->errorf(val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(this->pc_), index, expected.type_name(),
- SafeOpcodeNameAt(val.pc), val.type.type_name());
+ SafeOpcodeNameAt(this->pc_), index,
+ expected.type_name().c_str(), SafeOpcodeNameAt(val.pc),
+ val.type.type_name().c_str());
}
return val;
}
@@ -3391,7 +3696,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
return UnreachableValue(this->pc_);
}
- auto val = stack_.back();
+ Value val = stack_.back();
stack_.pop_back();
return val;
}
@@ -3435,9 +3740,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < merge->arity; ++i) {
Value& val = stack_values[i];
Value& old = (*merge)[i];
- if (!val.type.IsSubTypeOf(old.type)) {
+ if (!IsSubtypeOf(val.type, old.type, this->module_)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, old.type.type_name(), val.type.type_name());
+ i, old.type.type_name().c_str(),
+ val.type.type_name().c_str());
return false;
}
}
@@ -3452,9 +3758,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
for (uint32_t i = 0; i < c->start_merge.arity; ++i) {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
- if (!start.type.IsSubTypeOf(end.type)) {
+ if (!IsSubtypeOf(start.type, end.type, this->module_)) {
this->errorf(this->pc_, "type error in merge[%u] (expected %s, got %s)",
- i, end.type.type_name(), start.type.type_name());
+ i, end.type.type_name().c_str(),
+ start.type.type_name().c_str());
return false;
}
}
@@ -3463,7 +3770,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
bool TypeCheckFallThru() {
- static_assert(validate, "Call this function only whithin VALIDATE");
+ static_assert(validate, "Call this function only within VALIDATE");
Control& c = control_.back();
if (V8_LIKELY(c.reachable())) {
uint32_t expected = c.end_merge.arity;
@@ -3554,12 +3861,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// This line requires num_returns > 0.
Value* stack_values = &*(stack_.end() - num_returns);
for (int i = 0; i < num_returns; ++i) {
- auto& val = stack_values[i];
+ Value& val = stack_values[i];
ValueType expected_type = this->sig_->GetReturn(i);
- if (!val.type.IsSubTypeOf(expected_type)) {
- this->errorf(this->pc_,
- "type error in return[%u] (expected %s, got %s)", i,
- expected_type.type_name(), val.type.type_name());
+ if (!IsSubtypeOf(val.type, expected_type, this->module_)) {
+ this->errorf(
+ this->pc_, "type error in return[%u] (expected %s, got %s)", i,
+ expected_type.type_name().c_str(), val.type.type_name().c_str());
return false;
}
}
@@ -3568,14 +3875,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
void onFirstError() override {
this->end_ = this->pc_; // Terminate decoding loop.
+ this->current_code_reachable_ = false;
TRACE(" !%s\n", this->error_.message().c_str());
CALL_INTERFACE(OnFirstError);
}
void BuildSimplePrototypeOperator(WasmOpcode opcode) {
- if (opcode == kExprRefIsNull) {
- RET_ON_PROTOTYPE_OPCODE(anyref);
- } else if (opcode == kExprRefEq) {
+ if (opcode == kExprRefEq) {
RET_ON_PROTOTYPE_OPCODE(gc);
}
const FunctionSig* sig = WasmOpcodes::Signature(opcode);
@@ -3583,39 +3889,28 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
void BuildSimpleOperator(WasmOpcode opcode, const FunctionSig* sig) {
- switch (sig->parameter_count()) {
- case 1: {
- auto val = Pop(0, sig->GetParam(0));
- auto* ret =
- sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
- break;
- }
- case 2: {
- auto rval = Pop(1, sig->GetParam(1));
- auto lval = Pop(0, sig->GetParam(0));
- auto* ret =
- sig->return_count() == 0 ? nullptr : Push(sig->GetReturn(0));
- CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
- break;
- }
- default:
- UNREACHABLE();
+ DCHECK_GE(1, sig->return_count());
+ ValueType ret = sig->return_count() == 0 ? kWasmStmt : sig->GetReturn(0);
+ if (sig->parameter_count() == 1) {
+ BuildSimpleOperator(opcode, ret, sig->GetParam(0));
+ } else {
+ DCHECK_EQ(2, sig->parameter_count());
+ BuildSimpleOperator(opcode, ret, sig->GetParam(0), sig->GetParam(1));
}
}
void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType arg_type) {
- auto val = Pop(0, arg_type);
- auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ Value val = Pop(0, arg_type);
+ Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
CALL_INTERFACE_IF_REACHABLE(UnOp, opcode, val, ret);
}
void BuildSimpleOperator(WasmOpcode opcode, ValueType return_type,
ValueType lhs_type, ValueType rhs_type) {
- auto rval = Pop(1, rhs_type);
- auto lval = Pop(0, lhs_type);
- auto* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
+ Value rval = Pop(1, rhs_type);
+ Value lval = Pop(0, lhs_type);
+ Value* ret = return_type == kWasmStmt ? nullptr : Push(return_type);
CALL_INTERFACE_IF_REACHABLE(BinOp, opcode, lval, rval, ret);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.cc b/chromium/v8/src/wasm/function-body-decoder.cc
index 8b2b027b13a..a69d4166959 100644
--- a/chromium/v8/src/wasm/function-body-decoder.cc
+++ b/chromium/v8/src/wasm/function-body-decoder.cc
@@ -13,7 +13,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -21,14 +21,24 @@ namespace wasm {
bool DecodeLocalDecls(const WasmFeatures& enabled, BodyLocalDecls* decls,
const byte* start, const byte* end) {
- Decoder decoder(start, end);
- if (WasmDecoder<Decoder::kValidate>::DecodeLocals(enabled, &decoder, nullptr,
- &decls->type_list)) {
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kValidate> decoder(nullptr, enabled, &no_features,
+ nullptr, start, end, 0);
+ // The decoded functions need to be inserted into &decls->type_list,
+ // so we pass a pointer to it to local_types_ which will be updated
+ // in DecodeLocals.
+ decoder.local_types_ = &decls->type_list;
+ uint32_t length;
+ if (decoder.DecodeLocals(
+ decoder.pc(), &length,
+ static_cast<uint32_t>(decoder.local_types_->size()))) {
DCHECK(decoder.ok());
- decls->encoded_size = decoder.pc_offset();
+ decls->encoded_size = length;
return true;
+ } else {
+ decls->encoded_size = 0;
+ return false;
}
- return false;
}
BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
@@ -54,7 +64,9 @@ DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- Decoder decoder(pc, end);
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kNoValidate> decoder(nullptr, no_features, &no_features,
+ nullptr, pc, end, 0);
return WasmDecoder<Decoder::kNoValidate>::OpcodeLength(&decoder, pc);
}
@@ -164,8 +176,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
unsigned offset = 1;
WasmOpcode opcode = i.current();
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- os << PrefixName(opcode) << ", ";
+ WasmOpcode prefix = kExprUnreachable;
+ bool has_prefix = WasmOpcodes::IsPrefixOpcode(opcode);
+ if (has_prefix) {
+ prefix = i.current();
opcode = i.prefixed_opcode();
offset = 2;
}
@@ -181,6 +195,10 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
" ";
os.write(padding, num_whitespaces);
+ if (has_prefix) {
+ os << PrefixName(prefix) << ", ";
+ }
+
os << RawOpcodeName(opcode) << ",";
if (opcode == kExprLoop || opcode == kExprIf || opcode == kExprBlock ||
@@ -283,7 +301,9 @@ bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
- Decoder decoder(start, end);
+ WasmFeatures no_features = WasmFeatures::None();
+ WasmDecoder<Decoder::kValidate> decoder(nullptr, no_features, &no_features,
+ nullptr, start, end, 0);
return WasmDecoder<Decoder::kValidate>::AnalyzeLoopAssignment(
&decoder, start, static_cast<uint32_t>(num_locals), zone);
}
diff --git a/chromium/v8/src/wasm/function-body-decoder.h b/chromium/v8/src/wasm/function-body-decoder.h
index 4fab50817ca..2e14d844fa6 100644
--- a/chromium/v8/src/wasm/function-body-decoder.h
+++ b/chromium/v8/src/wasm/function-body-decoder.h
@@ -34,6 +34,8 @@ struct FunctionBody {
: sig(sig), offset(offset), start(start), end(end) {}
};
+enum class LoadTransformationKind : uint8_t { kSplat, kExtend };
+
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
const WasmFeatures& enabled,
const WasmModule* module,
@@ -80,9 +82,10 @@ V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
// Be cautious with control opcodes: This function only covers their immediate,
// local stack effect (e.g. BrIf pops 1, Br pops 0). Those opcodes can have
// non-local stack effect though, which are not covered here.
-std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
- const FunctionSig* sig,
- const byte* pc, const byte* end);
+// TODO(clemensb): This is only used by the interpreter; move there.
+V8_EXPORT_PRIVATE std::pair<uint32_t, uint32_t> StackEffect(
+ const WasmModule* module, const FunctionSig* sig, const byte* pc,
+ const byte* end);
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index 6b25520d84f..e268667d287 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -48,7 +48,7 @@ class WasmInstructionBufferImpl {
DCHECK_LT(size(), new_size);
holder_->old_buffer_ = std::move(holder_->buffer_);
- holder_->buffer_ = OwnedVector<uint8_t>::New(new_size);
+ holder_->buffer_ = OwnedVector<uint8_t>::NewForOverwrite(new_size);
return std::make_unique<View>(holder_->buffer_.as_vector(), holder_);
}
@@ -58,7 +58,7 @@ class WasmInstructionBufferImpl {
};
explicit WasmInstructionBufferImpl(size_t size)
- : buffer_(OwnedVector<uint8_t>::New(size)) {}
+ : buffer_(OwnedVector<uint8_t>::NewForOverwrite(size)) {}
std::unique_ptr<AssemblerBuffer> CreateView() {
DCHECK_NOT_NULL(buffer_);
@@ -278,7 +278,8 @@ JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
JSToWasmWrapperCompilationUnit::~JSToWasmWrapperCompilationUnit() = default;
void JSToWasmWrapperCompilationUnit::Execute() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "CompileJSToWasmWrapper");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.CompileJSToWasmWrapper");
CompilationJob::Status status = job_->ExecuteJob(nullptr);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
}
diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h
index c66c748064a..27fd54eb3b2 100644
--- a/chromium/v8/src/wasm/function-compiler.h
+++ b/chromium/v8/src/wasm/function-compiler.h
@@ -31,6 +31,7 @@ struct WasmFunction;
class WasmInstructionBuffer final {
public:
+ WasmInstructionBuffer() = delete;
~WasmInstructionBuffer();
std::unique_ptr<AssemblerBuffer> CreateView();
std::unique_ptr<uint8_t[]> ReleaseBuffer();
@@ -44,7 +45,6 @@ class WasmInstructionBuffer final {
void operator delete(void* ptr) { ::operator delete(ptr); }
private:
- WasmInstructionBuffer() = delete;
DISALLOW_COPY_AND_ASSIGN(WasmInstructionBuffer);
};
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index 5d23dbf1836..dc8cbf20f05 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -16,7 +16,7 @@
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
@@ -28,18 +28,33 @@ namespace {
// as well as the current effect and control dependency in the TF graph.
// It maintains a control state that tracks whether the environment
// is reachable, has reached a control end, or has been merged.
-struct SsaEnv {
+struct SsaEnv : public ZoneObject {
enum State { kControlEnd, kUnreachable, kReached, kMerged };
State state;
TFNode* control;
TFNode* effect;
compiler::WasmInstanceCacheNodes instance_cache;
- TFNode** locals;
+ ZoneVector<TFNode*> locals;
+
+ SsaEnv(Zone* zone, State state, TFNode* control, TFNode* effect,
+ uint32_t locals_size)
+ : state(state), control(control), effect(effect), locals(zone) {
+ if (locals_size > 0) locals.resize(locals_size);
+ }
+
+ SsaEnv(const SsaEnv& other) V8_NOEXCEPT = default;
+ SsaEnv(SsaEnv&& other) V8_NOEXCEPT : state(other.state),
+ control(other.control),
+ effect(other.effect),
+ instance_cache(other.instance_cache),
+ locals(std::move(other.locals)) {
+ other.Kill(kUnreachable);
+ }
void Kill(State new_state = kControlEnd) {
state = new_state;
- locals = nullptr;
+ locals.clear();
control = nullptr;
effect = nullptr;
instance_cache = {};
@@ -98,22 +113,14 @@ class WasmGraphBuildingInterface {
: builder_(builder) {}
void StartFunction(FullDecoder* decoder) {
- SsaEnv* ssa_env =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- uint32_t num_locals = decoder->num_locals();
- uint32_t env_count = num_locals;
- size_t size = sizeof(TFNode*) * env_count;
- ssa_env->state = SsaEnv::kReached;
- ssa_env->locals =
- size > 0 ? reinterpret_cast<TFNode**>(decoder->zone()->New(size))
- : nullptr;
-
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// instance parameter.
TFNode* start = builder_->Start(
static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
- ssa_env->effect = start;
- ssa_env->control = start;
+ uint32_t num_locals = decoder->num_locals();
+ SsaEnv* ssa_env = new (decoder->zone())
+ SsaEnv(decoder->zone(), SsaEnv::kReached, start, start, num_locals);
+
// Initialize effect and control before initializing the locals default
// values (which might require instance loads) or loading the context.
builder_->SetEffectControl(start);
@@ -135,6 +142,8 @@ class WasmGraphBuildingInterface {
}
SetEnv(ssa_env);
LoadContextIntoSsa(ssa_env);
+
+ if (FLAG_trace_wasm) BUILD(TraceFunctionEntry, decoder->position());
}
// Reload the instance cache entries into the Ssa Environment.
@@ -174,7 +183,7 @@ class WasmGraphBuildingInterface {
void Try(FullDecoder* decoder, Control* block) {
SsaEnv* outer_env = ssa_env_;
- SsaEnv* catch_env = Split(decoder, outer_env);
+ SsaEnv* catch_env = Split(decoder->zone(), outer_env);
// Mark catch environment as unreachable, since only accessable
// through catch unwinding (i.e. landing pads).
catch_env->state = SsaEnv::kUnreachable;
@@ -192,7 +201,7 @@ class WasmGraphBuildingInterface {
TFNode* if_false = nullptr;
BUILD(BranchNoHint, cond.node, &if_true, &if_false);
SsaEnv* end_env = ssa_env_;
- SsaEnv* false_env = Split(decoder, ssa_env_);
+ SsaEnv* false_env = Split(decoder->zone(), ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
@@ -232,7 +241,8 @@ class WasmGraphBuildingInterface {
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
- auto node = BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
+ TFNode* node =
+ BUILD(Binop, opcode, lhs.node, rhs.node, decoder->position());
if (result) result->node = node;
}
@@ -269,28 +279,41 @@ class WasmGraphBuildingInterface {
void DoReturn(FullDecoder* decoder, Vector<Value> values) {
base::SmallVector<TFNode*, 8> nodes(values.size());
GetNodes(nodes.begin(), values);
+ if (FLAG_trace_wasm) {
+ BUILD(TraceFunctionExit, VectorOf(nodes), decoder->position());
+ }
BUILD(Return, VectorOf(nodes));
}
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
result->node = ssa_env_->locals[imm.index];
}
void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
- if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
result->node = value.node;
- if (!ssa_env_->locals) return; // unreachable
ssa_env_->locals[imm.index] = value.node;
}
+ void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
+ ZoneVector<TFNode*>* locals = &ssa_env_->locals;
+ locals->insert(locals->begin(), local_values.size(), nullptr);
+ for (uint32_t i = 0; i < local_values.size(); i++) {
+ (*locals)[i] = local_values[i].node;
+ }
+ }
+
+ void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
+ ZoneVector<TFNode*>* locals = &ssa_env_->locals;
+ locals->erase(locals->begin(), locals->begin() + count);
+ }
+
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
result->node = BUILD(GlobalGet, imm.index);
@@ -345,7 +368,7 @@ class WasmGraphBuildingInterface {
void BrIf(FullDecoder* decoder, const Value& cond, uint32_t depth) {
SsaEnv* fenv = ssa_env_;
- SsaEnv* tenv = Split(decoder, fenv);
+ SsaEnv* tenv = Split(decoder->zone(), fenv);
fenv->SetNotMerged();
BUILD(BranchNoHint, cond.node, &tenv->control, &fenv->control);
builder_->SetControl(fenv->control);
@@ -373,7 +396,7 @@ class WasmGraphBuildingInterface {
while (iterator.has_next()) {
uint32_t i = iterator.cur_index();
uint32_t target = iterator.next();
- SetEnv(Split(decoder, copy));
+ SetEnv(Split(decoder->zone(), copy));
builder_->SetControl(i == imm.table_count ? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw));
BrOrRet(decoder, target);
@@ -452,7 +475,7 @@ class WasmGraphBuildingInterface {
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
SsaEnv* non_null_env = ssa_env_;
- SsaEnv* null_env = Split(decoder, non_null_env);
+ SsaEnv* null_env = Split(decoder->zone(), non_null_env);
non_null_env->SetNotMerged();
BUILD(BrOnNull, ref_object.node, &null_env->control,
&non_null_env->control);
@@ -514,7 +537,7 @@ class WasmGraphBuildingInterface {
TFNode* exception_tag = BUILD(LoadExceptionTagFromTable, imm.index);
TFNode* compare = BUILD(ExceptionTagEqual, caught_tag, exception_tag);
BUILD(BranchNoHint, compare, &if_match, &if_no_match);
- SsaEnv* if_no_match_env = Split(decoder, ssa_env_);
+ SsaEnv* if_no_match_env = Split(decoder->zone(), ssa_env_);
SsaEnv* if_match_env = Steal(decoder->zone(), ssa_env_);
if_no_match_env->control = if_no_match;
if_match_env->control = if_match;
@@ -536,6 +559,7 @@ class WasmGraphBuildingInterface {
void Catch(FullDecoder* decoder, Control* block, Value* exception) {
DCHECK(block->is_try_catch());
+ DCHECK_EQ(decoder->control_at(0), block);
current_catch_ = block->previous_catch; // Pop try scope.
@@ -543,7 +567,7 @@ class WasmGraphBuildingInterface {
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
if (!block->try_info->might_throw()) {
- block->reachability = kSpecOnlyReachable;
+ decoder->SetSucceedingCodeDynamicallyUnreachable();
return;
}
@@ -630,14 +654,15 @@ class WasmGraphBuildingInterface {
}
void StructGet(FullDecoder* decoder, const Value& struct_object,
- const FieldIndexImmediate<validate>& field, Value* result) {
+ const FieldIndexImmediate<validate>& field, bool is_signed,
+ Value* result) {
using CheckForNull = compiler::WasmGraphBuilder::CheckForNull;
CheckForNull null_check = struct_object.type.kind() == ValueType::kRef
? CheckForNull::kWithoutNullCheck
: CheckForNull::kWithNullCheck;
result->node =
BUILD(StructGet, struct_object.node, field.struct_index.struct_type,
- field.index, null_check, decoder->position());
+ field.index, null_check, is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
@@ -660,9 +685,9 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
- Value* result) {
+ bool is_signed, Value* result) {
result->node = BUILD(ArrayGet, array_obj.node, imm.array_type, index.node,
- decoder->position());
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
@@ -676,6 +701,11 @@ class WasmGraphBuildingInterface {
result->node = BUILD(ArrayLen, array_obj.node, decoder->position());
}
+ void RttCanon(FullDecoder* decoder, const TypeIndexImmediate<validate>& imm,
+ Value* result) {
+ result->node = BUILD(RttCanon, imm.index);
+ }
+
void PassThrough(FullDecoder* decoder, const Value& from, Value* to) {
to->node = from.node;
}
@@ -755,7 +785,7 @@ class WasmGraphBuildingInterface {
SsaEnv* success_env = Steal(decoder->zone(), ssa_env_);
success_env->control = if_success;
- SsaEnv* exception_env = Split(decoder, success_env);
+ SsaEnv* exception_env = Split(decoder->zone(), success_env);
exception_env->control = if_exception;
exception_env->effect = if_exception;
SetEnv(exception_env);
@@ -777,6 +807,8 @@ class WasmGraphBuildingInterface {
TFNode* DefaultValue(ValueType type) {
switch (type.kind()) {
+ case ValueType::kI8:
+ case ValueType::kI16:
case ValueType::kI32:
return builder_->Int32Constant(0);
case ValueType::kI64:
@@ -787,14 +819,12 @@ class WasmGraphBuildingInterface {
return builder_->Float64Constant(0);
case ValueType::kS128:
return builder_->S128Zero();
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
case ValueType::kOptRef:
- case ValueType::kEqRef:
return builder_->RefNull();
- default:
+ case ValueType::kRtt:
+ case ValueType::kStmt:
+ case ValueType::kBottom:
+ case ValueType::kRef:
UNREACHABLE();
}
}
@@ -920,7 +950,7 @@ class WasmGraphBuildingInterface {
control());
}
- SetEnv(Split(decoder, ssa_env_));
+ SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
return;
}
@@ -934,32 +964,19 @@ class WasmGraphBuildingInterface {
// Conservatively introduce phis for instance cache.
builder_->PrepareInstanceCacheForLoop(&ssa_env_->instance_cache, control());
- SetEnv(Split(decoder, ssa_env_));
+ SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
}
// Create a complete copy of {from}.
- SsaEnv* Split(FullDecoder* decoder, SsaEnv* from) {
+ SsaEnv* Split(Zone* zone, SsaEnv* from) {
DCHECK_NOT_NULL(from);
if (from == ssa_env_) {
ssa_env_->control = control();
ssa_env_->effect = effect();
}
- SsaEnv* result =
- reinterpret_cast<SsaEnv*>(decoder->zone()->New(sizeof(SsaEnv)));
- size_t size = sizeof(TFNode*) * decoder->num_locals();
- result->control = from->control;
- result->effect = from->effect;
-
+ SsaEnv* result = new (zone) SsaEnv(*from);
result->state = SsaEnv::kReached;
- if (size > 0) {
- result->locals = reinterpret_cast<TFNode**>(decoder->zone()->New(size));
- memcpy(result->locals, from->locals, size);
- } else {
- result->locals = nullptr;
- }
- result->instance_cache = from->instance_cache;
-
return result;
}
@@ -971,25 +988,14 @@ class WasmGraphBuildingInterface {
ssa_env_->control = control();
ssa_env_->effect = effect();
}
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
+ SsaEnv* result = new (zone) SsaEnv(std::move(*from));
result->state = SsaEnv::kReached;
- result->locals = from->locals;
- result->control = from->control;
- result->effect = from->effect;
- result->instance_cache = from->instance_cache;
- from->Kill(SsaEnv::kUnreachable);
return result;
}
// Create an unreachable environment.
SsaEnv* UnreachableEnv(Zone* zone) {
- SsaEnv* result = reinterpret_cast<SsaEnv*>(zone->New(sizeof(SsaEnv)));
- result->state = SsaEnv::kUnreachable;
- result->control = nullptr;
- result->effect = nullptr;
- result->locals = nullptr;
- result->instance_cache = {};
- return result;
+ return new (zone) SsaEnv(zone, SsaEnv::kUnreachable, nullptr, nullptr, 0);
}
void DoCall(FullDecoder* decoder, uint32_t table_index, TFNode* index_node,
diff --git a/chromium/v8/src/wasm/local-decl-encoder.cc b/chromium/v8/src/wasm/local-decl-encoder.cc
index 257f384bef3..aea6e573e9b 100644
--- a/chromium/v8/src/wasm/local-decl-encoder.cc
+++ b/chromium/v8/src/wasm/local-decl-encoder.cc
@@ -28,11 +28,17 @@ size_t LocalDeclEncoder::Emit(byte* buffer) const {
byte* pos = buffer;
LEBHelper::write_u32v(&pos, static_cast<uint32_t>(local_decls.size()));
for (auto& local_decl : local_decls) {
- LEBHelper::write_u32v(&pos, local_decl.first);
- *pos = local_decl.second.value_type_code();
+ uint32_t locals_count = local_decl.first;
+ ValueType locals_type = local_decl.second;
+ LEBHelper::write_u32v(&pos, locals_count);
+ *pos = locals_type.value_type_code();
++pos;
- if (local_decl.second.has_immediate()) {
- LEBHelper::write_u32v(&pos, local_decl.second.ref_index());
+ if (locals_type.has_depth()) {
+ *pos = locals_type.depth();
+ ++pos;
+ }
+ if (locals_type.encoding_needs_heap_type()) {
+ LEBHelper::write_u32v(&pos, locals_type.heap_type_code());
}
}
DCHECK_EQ(Size(), pos - buffer);
@@ -56,11 +62,12 @@ uint32_t LocalDeclEncoder::AddLocals(uint32_t count, ValueType type) {
size_t LocalDeclEncoder::Size() const {
size_t size = LEBHelper::sizeof_u32v(local_decls.size());
for (auto p : local_decls) {
- size +=
- LEBHelper::sizeof_u32v(p.first) + // number of locals
- 1 + // Opcode
- (p.second.has_immediate() ? LEBHelper::sizeof_u32v(p.second.ref_index())
- : 0); // immediate
+ size += LEBHelper::sizeof_u32v(p.first) + // number of locals
+ 1 + // Opcode
+ (p.second.has_depth() ? 1 : 0) + // Inheritance depth
+ (p.second.encoding_needs_heap_type()
+ ? LEBHelper::sizeof_u32v(p.second.heap_type_code())
+ : 0); // ref. index
}
return size;
}
diff --git a/chromium/v8/src/wasm/memory-tracing.h b/chromium/v8/src/wasm/memory-tracing.h
index 15457399c17..9ea605b3563 100644
--- a/chromium/v8/src/wasm/memory-tracing.h
+++ b/chromium/v8/src/wasm/memory-tracing.h
@@ -30,8 +30,10 @@ struct MemoryTracingInfo {
// Callback for tracing a memory operation for debugging.
// Triggered by --wasm-trace-memory.
-void TraceMemoryOperation(ExecutionTier, const MemoryTracingInfo* info,
- int func_index, int position, uint8_t* mem_start);
+V8_EXPORT_PRIVATE void TraceMemoryOperation(ExecutionTier,
+ const MemoryTracingInfo* info,
+ int func_index, int position,
+ uint8_t* mem_start);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 9f6e91c73ea..94cc15cb11b 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -627,18 +627,26 @@ void BackgroundCompileToken::PublishCode(
NativeModule* native_module, Vector<std::unique_ptr<WasmCode>> code) {
WasmCodeRefScope code_ref_scope;
std::vector<WasmCode*> published_code = native_module->PublishCode(code);
- native_module->engine()->LogCode(VectorOf(published_code));
+ // Defer logging code in case wire bytes were not fully received yet.
+ if (native_module->HasWireBytes()) {
+ native_module->engine()->LogCode(VectorOf(published_code));
+ }
Impl(native_module->compilation_state())
->OnFinishedUnits(VectorOf(published_code));
}
void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
- if (detected.has_threads()) {
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmThreadOpcodes);
- }
- if (detected.has_simd()) {
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSimdOpcodes);
+ using Feature = v8::Isolate::UseCounterFeature;
+ constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
+ {kFeature_reftypes, Feature::kWasmRefTypes},
+ {kFeature_bulk_memory, Feature::kWasmBulkMemory},
+ {kFeature_mv, Feature::kWasmMultiValue},
+ {kFeature_simd, Feature::kWasmSimdOpcodes},
+ {kFeature_threads, Feature::kWasmThreadOpcodes}};
+
+ for (auto& feature : kUseCounters) {
+ if (detected.contains(feature.first)) isolate->CountUsage(feature.second);
}
}
@@ -802,6 +810,9 @@ class CompilationUnitBuilder {
ExecutionTierPair tiers = GetRequestedExecutionTiers(
native_module_->module(), compilation_state()->compile_mode(),
native_module_->enabled_features(), func_index);
+ // Compile everything for non-debugging initially. If needed, we will tier
+ // down when the module is fully compiled. Synchronization would be pretty
+ // difficult otherwise.
baseline_units_.emplace_back(func_index, tiers.baseline_tier, kNoDebugging);
if (tiers.baseline_tier != tiers.top_tier) {
tiering_units_.emplace_back(func_index, tiers.top_tier, kNoDebugging);
@@ -1038,15 +1049,13 @@ bool ExecuteJSToWasmWrapperCompilationUnits(
return true;
}
-bool NeedsDeterministicCompile() { return FLAG_single_threaded; }
-
// Run by the main thread and background tasks to take part in compilation.
// Returns whether any units were executed.
bool ExecuteCompilationUnits(
const std::shared_ptr<BackgroundCompileToken>& token, Counters* counters,
int task_id, CompileBaselineOnly baseline_only) {
TRACE_COMPILE("Compiling (task %d)...\n", task_id);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ExecuteCompilationUnits");
+ TRACE_EVENT0("v8.wasm", "wasm.ExecuteCompilationUnits");
// Execute JS to Wasm wrapper units first, so that they are ready to be
// finalized by the main thread when the kFinishedBaselineCompilation event is
@@ -1067,7 +1076,6 @@ bool ExecuteCompilationUnits(
// These fields are initialized in a {BackgroundCompileScope} before
// starting compilation.
double deadline = 0;
- const bool deterministic = NeedsDeterministicCompile();
base::Optional<CompilationEnv> env;
std::shared_ptr<WireBytesStorage> wire_bytes;
std::shared_ptr<const WasmModule> module;
@@ -1108,8 +1116,9 @@ bool ExecuteCompilationUnits(
auto publish_results = [&results_to_publish](
BackgroundCompileScope* compile_scope) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "PublishResults",
- "num_results", results_to_publish.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.PublishCompilationResults", "num_results",
+ results_to_publish.size());
if (results_to_publish.empty()) return;
std::vector<std::unique_ptr<WasmCode>> unpublished_code =
compile_scope->native_module()->AddCompiledCode(
@@ -1161,7 +1170,8 @@ bool ExecuteCompilationUnits(
}
// Get next unit.
- if (deterministic || deadline < platform->MonotonicallyIncreasingTime()) {
+ if (FLAG_predictable ||
+ deadline < platform->MonotonicallyIncreasingTime()) {
unit = {};
} else {
unit = compile_scope.compilation_state()->GetNextCompilationUnit(
@@ -1419,9 +1429,15 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
Handle<FixedArray>* export_wrappers_out) {
const WasmModule* wasm_module = module.get();
+ OwnedVector<uint8_t> wire_bytes_copy =
+ OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
+ // Prefer {wire_bytes_copy} to {wire_bytes.module_bytes()} for the temporary
+ // cache key. When we eventually install the module in the cache, the wire
+ // bytes of the temporary key and the new key have the same base pointer and
+ // we can skip the full bytes comparison.
std::shared_ptr<NativeModule> native_module =
isolate->wasm_engine()->MaybeGetNativeModule(
- wasm_module->origin, wire_bytes.module_bytes(), isolate);
+ wasm_module->origin, wire_bytes_copy.as_vector(), isolate);
if (native_module) {
// TODO(thibaudm): Look into sharing export wrappers.
CompileJsToWasmWrappers(isolate, wasm_module, export_wrappers_out);
@@ -1435,8 +1451,6 @@ std::shared_ptr<NativeModule> CompileToNativeModule(
if (wasm_module->has_shared_memory) {
isolate->CountUsage(v8::Isolate::UseCounterFeature::kWasmSharedMemory);
}
- OwnedVector<uint8_t> wire_bytes_copy =
- OwnedVector<uint8_t>::Of(wire_bytes.module_bytes());
// Create a new {NativeModule} first.
const bool uses_liftoff = module->origin == kWasmOrigin && FLAG_liftoff;
@@ -1481,20 +1495,17 @@ void RecompileNativeModule(NativeModule* native_module,
}
});
- // We only wait for tier down. Tier up can happen in the background.
- if (tiering_state == kTieredDown) {
- // The main thread contributes to the compilation.
- constexpr Counters* kNoCounters = nullptr;
- while (ExecuteCompilationUnits(
- compilation_state->background_compile_token(), kNoCounters,
- kMainThreadTaskId, kBaselineOnly)) {
- // Continue executing compilation units.
- }
-
- // Now wait until baseline recompilation finished.
- recompilation_finished_semaphore->Wait();
- DCHECK(!compilation_state->failed());
+ // The main thread contributes to the compilation.
+ constexpr Counters* kNoCounters = nullptr;
+ while (ExecuteCompilationUnits(compilation_state->background_compile_token(),
+ kNoCounters, kMainThreadTaskId,
+ kBaselineOnly)) {
+ // Continue executing compilation units.
}
+
+ // Now wait until all compilation units finished.
+ recompilation_finished_semaphore->Wait();
+ DCHECK(!compilation_state->failed());
}
AsyncCompileJob::AsyncCompileJob(
@@ -1510,7 +1521,9 @@ AsyncCompileJob::AsyncCompileJob(
bytes_copy_(std::move(bytes_copy)),
wire_bytes_(bytes_copy_.get(), bytes_copy_.get() + length),
resolver_(std::move(resolver)) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "new AsyncCompileJob");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.AsyncCompileJob");
+ CHECK(FLAG_wasm_async_compilation);
CHECK(!FLAG_jitless);
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
@@ -1536,7 +1549,7 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<Counters> counters,
AccountingAllocator* allocator);
- ~AsyncStreamingProcessor();
+ ~AsyncStreamingProcessor() override;
bool ProcessModuleHeader(Vector<const uint8_t> bytes,
uint32_t offset) override;
@@ -1586,8 +1599,9 @@ class AsyncStreamingProcessor final : public StreamingProcessor {
std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
DCHECK_NULL(stream_);
- stream_.reset(new StreamingDecoder(std::make_unique<AsyncStreamingProcessor>(
- this, isolate_->async_counters(), isolate_->allocator())));
+ stream_ = StreamingDecoder::CreateAsyncStreamingDecoder(
+ std::make_unique<AsyncStreamingProcessor>(
+ this, isolate_->async_counters(), isolate_->allocator()));
return stream_;
}
@@ -1656,8 +1670,8 @@ void AsyncCompileJob::PrepareRuntimeObjects() {
// This function assumes that it is executed in a HandleScope, and that a
// context is set on the isolate.
void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "AsyncCompileJob::FinishCompile");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.FinishAsyncCompile");
bool is_after_deserialization = !module_object_.is_null();
auto compilation_state = Impl(native_module_->compilation_state());
if (!is_after_deserialization) {
@@ -1689,7 +1703,8 @@ void AsyncCompileJob::FinishCompile(bool is_after_cache_hit) {
script->set_source_mapping_url(*src_map_str.ToHandleChecked());
}
{
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "Debug::OnAfterCompile");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.Debug.OnAfterCompile");
isolate_->debug()->OnAfterCompile(script);
}
@@ -1736,8 +1751,8 @@ void AsyncCompileJob::AsyncCompileFailed() {
}
void AsyncCompileJob::AsyncCompileSucceeded(Handle<WasmModuleObject> result) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "CompilationResultResolver::OnCompilationSucceeded");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.OnCompilationSucceeded");
resolver_->OnCompilationSucceeded(result);
}
@@ -1771,8 +1786,11 @@ class AsyncCompileJob::CompilationStateCallback {
case CompilationEvent::kFailedCompilation:
DCHECK(!last_event_.has_value());
if (job_->DecrementAndCheckFinisherCount()) {
+ // Don't update {job_->native_module_} to avoid data races with other
+ // compilation threads. Use a copy of the shared pointer instead.
+ std::shared_ptr<NativeModule> native_module = job_->native_module_;
job_->isolate_->wasm_engine()->UpdateNativeModuleCache(
- true, &job_->native_module_, job_->isolate_);
+ true, &native_module, job_->isolate_);
job_->DoSync<CompileFailed>();
}
break;
@@ -1781,8 +1799,6 @@ class AsyncCompileJob::CompilationStateCallback {
// {kFinishedTopTierCompilation}, hence don't remember this in
// {last_event_}.
return;
- default:
- UNREACHABLE();
}
#ifdef DEBUG
last_event_ = event;
@@ -1933,8 +1949,8 @@ class AsyncCompileJob::DecodeModule : public AsyncCompileJob::CompileStep {
DisallowHeapAllocation no_allocation;
// Decode the module bytes.
TRACE_COMPILE("(1) Decoding module...\n");
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "AsyncCompileJob::DecodeModule");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.DecodeModule");
auto enabled_features = job->enabled_features_;
result = DecodeWasmModule(enabled_features, job->wire_bytes_.start(),
job->wire_bytes_.end(), false, kWasmOrigin,
@@ -2404,9 +2420,17 @@ void AsyncStreamingProcessor::OnFinishedStream(OwnedVector<uint8_t> bytes) {
} else {
job_->native_module_->SetWireBytes(
{std::move(job_->bytes_copy_), job_->wire_bytes_.length()});
+ job_->native_module_->LogWasmCodes(job_->isolate_);
}
const bool needs_finish = job_->DecrementAndCheckFinisherCount();
DCHECK_IMPLIES(!has_code_section, needs_finish);
+ // We might need to recompile the module for debugging, if the debugger was
+ // enabled while streaming compilation was running. Since handling this while
+ // compiling via streaming is tricky, we just tier down now, before publishing
+ // the module.
+ if (job_->native_module_->IsTieredDown()) {
+ job_->native_module_->RecompileForTiering();
+ }
if (needs_finish) {
const bool failed = job_->native_module_->compilation_state()->failed();
if (!cache_hit) {
@@ -2434,6 +2458,7 @@ void AsyncStreamingProcessor::OnAbort() {
bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
Vector<const uint8_t> wire_bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.Deserialize");
// DeserializeNativeModule and FinishCompile assume that they are executed in
// a HandleScope, and that a context is set on the isolate.
HandleScope scope(job_->isolate_);
@@ -2453,7 +2478,6 @@ bool AsyncStreamingProcessor::Deserialize(Vector<const uint8_t> module_bytes,
}
int GetMaxBackgroundTasks() {
- if (NeedsDeterministicCompile()) return 0;
int num_worker_threads = V8::GetCurrentPlatform()->NumberOfWorkerThreads();
return std::min(FLAG_wasm_num_compilation_tasks, num_worker_threads);
}
@@ -2569,36 +2593,54 @@ void CompilationStateImpl::InitializeRecompilation(
// Generate necessary compilation units on the fly.
CompilationUnitBuilder builder(native_module_);
+ // Information about compilation progress is shared between this class and the
+ // NativeModule. Before updating information here, consult the NativeModule to
+ // find all functions that need recompilation.
+ // Since the current tiering state is updated on the NativeModule before
+ // triggering recompilation, it's OK if the information is slightly outdated.
+ // If we compile functions twice, the NativeModule will ignore all redundant
+ // code (or code compiled for the wrong tier).
+ std::vector<int> recompile_function_indexes =
+ native_module_->FindFunctionsToRecompile(new_tiering_state);
+
{
base::MutexGuard guard(&callbacks_mutex_);
- // Restart recompilation if another recompilation is already happening.
- outstanding_recompilation_functions_ = 0;
- // If compilation hasn't started yet then code would be kept as tiered-down
- // and don't need to recompile.
+ callbacks_.emplace_back(std::move(recompilation_finished_callback));
+ tiering_state_ = new_tiering_state;
+
+ // If compilation progress is not initialized yet, then compilation didn't
+ // start yet, and new code will be kept tiered-down from the start. For
+ // streaming compilation, there is a special path to tier down later, when
+ // the module is complete. In any case, we don't need to recompile here.
if (compilation_progress_.size() > 0) {
const WasmModule* module = native_module_->module();
+ DCHECK_EQ(module->num_declared_functions, compilation_progress_.size());
+ DCHECK_GE(module->num_declared_functions,
+ recompile_function_indexes.size());
+ outstanding_recompilation_functions_ =
+ static_cast<int>(recompile_function_indexes.size());
+ // Restart recompilation if another recompilation is already happening.
+ for (auto& progress : compilation_progress_) {
+ progress = MissingRecompilationField::update(progress, false);
+ }
+ auto new_tier = new_tiering_state == kTieredDown
+ ? ExecutionTier::kLiftoff
+ : ExecutionTier::kTurbofan;
int imported = module->num_imported_functions;
- int declared = module->num_declared_functions;
- outstanding_recompilation_functions_ = declared;
- DCHECK_EQ(declared, compilation_progress_.size());
- for (int slot_index = 0; slot_index < declared; ++slot_index) {
- compilation_progress_[slot_index] = MissingRecompilationField::update(
- compilation_progress_[slot_index], true);
- builder.AddRecompilationUnit(imported + slot_index,
- new_tiering_state == kTieredDown
- ? ExecutionTier::kLiftoff
- : ExecutionTier::kTurbofan);
+ for (int function_index : recompile_function_indexes) {
+ DCHECK_LE(imported, function_index);
+ int slot_index = function_index - imported;
+ auto& progress = compilation_progress_[slot_index];
+ progress = MissingRecompilationField::update(progress, true);
+ builder.AddRecompilationUnit(function_index, new_tier);
}
}
- // Trigger callback if module needs no recompilation. Add to the list of
- // callbacks (to be called later) otherwise.
+ // Trigger callback if module needs no recompilation.
if (outstanding_recompilation_functions_ == 0) {
- recompilation_finished_callback(CompilationEvent::kFinishedRecompilation);
- } else {
- callbacks_.emplace_back(std::move(recompilation_finished_callback));
- tiering_state_ = new_tiering_state;
+ TriggerCallbacks(base::EnumSet<CompilationEvent>(
+ {CompilationEvent::kFinishedRecompilation}));
}
}
@@ -2661,8 +2703,9 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
// TODO(6792): Wrappers below are allocated with {Factory::NewCode}. As an
// optimization we keep the code space unlocked to avoid repeated unlocking
// because many such wrapper are allocated in sequence below.
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FinalizeJSToWasmWrappers",
- "num_wrappers", js_to_wasm_wrapper_units_.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.FinalizeJSToWasmWrappers", "num_wrappers",
+ js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
Handle<Code> code = unit->Finalize(isolate);
@@ -2680,8 +2723,8 @@ CompilationStateImpl::GetNextCompilationUnit(
}
void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "OnFinishedUnits",
- "num_units", code_vector.size());
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.OnFinishedUnits", "num_units", code_vector.size());
base::MutexGuard guard(&callbacks_mutex_);
@@ -2804,13 +2847,13 @@ void CompilationStateImpl::TriggerCallbacks(
for (auto event :
{std::make_pair(CompilationEvent::kFinishedBaselineCompilation,
- "BaselineFinished"),
+ "wasm.BaselineFinished"),
std::make_pair(CompilationEvent::kFinishedTopTierCompilation,
- "TopTierFinished"),
+ "wasm.TopTierFinished"),
std::make_pair(CompilationEvent::kFinishedRecompilation,
- "RecompilationFinished")}) {
+ "wasm.RecompilationFinished")}) {
if (!triggered_events.contains(event.first)) continue;
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), event.second);
+ TRACE_EVENT0("v8.wasm", event.second);
for (auto& callback : callbacks_) {
callback(event.first);
}
@@ -2885,15 +2928,12 @@ void CompilationStateImpl::RestartBackgroundTasks() {
}
}
- if (baseline_compilation_finished() && recompilation_finished()) {
- for (auto& task : new_tasks) {
- V8::GetCurrentPlatform()->CallLowPriorityTaskOnWorkerThread(
- std::move(task));
- }
- } else {
- for (auto& task : new_tasks) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
- }
+ // Spawn all tasts with default priority (avoid
+ // {CallLowPriorityTaskOnWorkerThread}) even for tier up, because low priority
+ // tasks will be severely delayed even if background threads are idle (see
+ // https://crbug.com/1094928).
+ for (auto& task : new_tasks) {
+ V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
}
}
diff --git a/chromium/v8/src/wasm/module-compiler.h b/chromium/v8/src/wasm/module-compiler.h
index a3fc4037a21..845e7a343b7 100644
--- a/chromium/v8/src/wasm/module-compiler.h
+++ b/chromium/v8/src/wasm/module-compiler.h
@@ -63,7 +63,8 @@ WasmCode* CompileImportWrapper(
// Triggered by the WasmCompileLazy builtin. The return value indicates whether
// compilation was successful. Lazy compilation can fail only if validation is
// also lazy.
-bool CompileLazy(Isolate*, NativeModule*, int func_index);
+// TODO(clemensb): Stop calling this from the interpreter, and don't export.
+V8_EXPORT_PRIVATE bool CompileLazy(Isolate*, NativeModule*, int func_index);
int GetMaxBackgroundTasks();
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index e7ecd1396ba..defb3dea306 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -33,7 +33,7 @@ constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
constexpr char kDebugInfoString[] = ".debug_info";
-constexpr char kExternalDebugInfoString[] = ".external_debug_info";
+constexpr char kExternalDebugInfoString[] = "external_debug_info";
const char* ExternalKindName(ImportExportKindCode kind) {
switch (kind) {
@@ -122,11 +122,13 @@ ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
return kWasmF32;
case WasmInitExpr::kF64Const:
return kWasmF64;
- case WasmInitExpr::kRefNullConst:
- return kWasmNullRef;
case WasmInitExpr::kRefFuncConst:
- return kWasmFuncRef;
- default:
+ return ValueType::Ref(kHeapFunc, kNonNullable);
+ case WasmInitExpr::kRefNullConst:
+ // It is not possible to retrieve the full {ValueType} of a {WasmInitExpr}
+ // of kind {kRefNullConst}. As WasmInitExpr of kind {krefNullConst} is
+ // only valid in globals, the {ValueType} has to be retrieved from the
+ // global definition itself.
UNREACHABLE();
}
}
@@ -406,7 +408,6 @@ class ModuleDecoderImpl : public Decoder {
void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
uint32_t offset, bool verify_functions = true) {
- VerifyFunctionDeclarations(section_code);
if (failed()) return;
Reset(bytes, offset);
TRACE("Section: %s\n", SectionName(section_code));
@@ -447,7 +448,7 @@ class ModuleDecoderImpl : public Decoder {
// if produced by compiler. Its presence likely means that Wasm was
// built in a debug mode.
case kExternalDebugInfoSectionCode:
- // .external_debug_info is a custom section containing a reference to an
+ // external_debug_info is a custom section containing a reference to an
// external symbol file.
case kCompilationHintsSectionCode:
// TODO(frgossen): report out of place compilation hints section as a
@@ -559,7 +560,8 @@ class ModuleDecoderImpl : public Decoder {
uint8_t kind = consume_u8("type kind");
switch (kind) {
case kWasmFunctionTypeCode: {
- const FunctionSig* s = consume_sig(module_->signature_zone.get());
+ const FunctionSig* s = consume_sig(module_->signature_zone.get(),
+ DeferIndexCheckMode::kDeferCheck);
module_->add_signature(s);
break;
}
@@ -589,6 +591,27 @@ class ModuleDecoderImpl : public Decoder {
}
}
module_->signature_map.Freeze();
+ VerifyDeferredTypeOffsets();
+ }
+
+ // TODO(7748): When typed function references are allowed, this should be
+ // deleted altogether and replaced by an inline in-bounds check.
+ void VerifyDeferredTypeOffsets() {
+ for (auto& type_offset : deferred_check_type_index_) {
+ uint32_t type_index = type_offset.first;
+ uint32_t code_offset = type_offset.second;
+ if (type_index >= module_->type_kinds.size()) {
+ errorf(code_offset, "reference to undeclared struct/array #%u",
+ type_index);
+ break;
+ }
+ uint8_t type = module_->type_kinds[type_index];
+ if (type == kWasmFunctionTypeCode) {
+ errorf(code_offset, "cannot build reference to function type index #%u",
+ type_index);
+ break;
+ }
+ }
}
void DecodeImportSection() {
@@ -637,12 +660,6 @@ class ModuleDecoderImpl : public Decoder {
WasmTable* table = &module_->tables.back();
table->imported = true;
ValueType type = consume_reference_type();
- if (!enabled_features_.has_anyref()) {
- if (type != kWasmFuncRef) {
- error(pc_ - 1, "invalid table type");
- break;
- }
- }
table->type = type;
uint8_t flags = validate_table_flags("element count");
consume_resizable_limits(
@@ -723,9 +740,9 @@ class ModuleDecoderImpl : public Decoder {
void DecodeTableSection() {
// TODO(ahaas): Set the correct limit to {kV8MaxWasmTables} once the
- // implementation of AnyRef landed.
+ // implementation of ExternRef landed.
uint32_t max_count =
- enabled_features_.has_anyref() ? 100000 : kV8MaxWasmTables;
+ enabled_features_.has_reftypes() ? 100000 : kV8MaxWasmTables;
uint32_t table_count = consume_count("table count", max_count);
for (uint32_t i = 0; ok() && i < table_count; i++) {
@@ -793,8 +810,14 @@ class ModuleDecoderImpl : public Decoder {
WasmFunction* func = nullptr;
exp->index =
consume_func_index(module_.get(), &func, "export function index");
+
+ if (failed()) break;
+ DCHECK_NOT_NULL(func);
+
module_->num_exported_functions++;
- if (func) func->exported = true;
+ func->exported = true;
+ // Exported functions are considered "declared".
+ func->declared = true;
break;
}
case kExternalTable: {
@@ -899,10 +922,11 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos, "out of bounds table index %u", table_index);
break;
}
- if (!type.IsSubTypeOf(module_->tables[table_index].type)) {
+ if (!IsSubtypeOf(type, module_->tables[table_index].type,
+ this->module_.get())) {
errorf(pos,
"Invalid element segment. Table %u is not a super-type of %s",
- table_index, type.type_name());
+ table_index, type.type_name().c_str());
break;
}
}
@@ -1203,41 +1227,7 @@ class ModuleDecoderImpl : public Decoder {
return true;
}
- void VerifyFunctionDeclarations(SectionCode section_code) {
- // Since we will only know if a function was properly declared after all the
- // element sections have been parsed, but we need to verify the proper use
- // within global initialization, we are deferring those checks.
- if (deferred_funcref_error_offsets_.empty()) {
- // No verifications to do be done.
- return;
- }
- if (!ok()) {
- // Previous errors exist.
- return;
- }
- // TODO(ecmziegler): Adjust logic if module order changes (e.g. event
- // section).
- if (section_code <= kElementSectionCode &&
- section_code != kUnknownSectionCode) {
- // Before the element section and not at end of decoding.
- return;
- }
- for (auto& func_offset : deferred_funcref_error_offsets_) {
- DCHECK_LT(func_offset.first, module_->functions.size());
- if (!module_->functions[func_offset.first].declared) {
- errorf(func_offset.second, "undeclared reference to function #%u",
- func_offset.first);
- break;
- }
- }
- deferred_funcref_error_offsets_.clear();
- }
-
ModuleResult FinishDecoding(bool verify_functions = true) {
- // Ensure that function verifications were done even if no section followed
- // the global section.
- VerifyFunctionDeclarations(kUnknownSectionCode);
-
if (ok() && CheckMismatchedCounts()) {
CalculateGlobalOffsets(module_.get());
}
@@ -1298,7 +1288,7 @@ class ModuleDecoderImpl : public Decoder {
pc_ = start_;
expect_u8("type form", kWasmFunctionTypeCode);
if (!ok()) return FunctionResult{std::move(intermediate_error_)};
- function->sig = consume_sig(zone);
+ function->sig = consume_sig(zone, DeferIndexCheckMode::kNoCheck);
function->code = {off(pc_), static_cast<uint32_t>(end_ - pc_)};
if (ok())
@@ -1316,7 +1306,8 @@ class ModuleDecoderImpl : public Decoder {
const FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
pc_ = start;
if (!expect_u8("type form", kWasmFunctionTypeCode)) return nullptr;
- const FunctionSig* result = consume_sig(zone);
+ const FunctionSig* result =
+ consume_sig(zone, DeferIndexCheckMode::kNoCheck);
return ok() ? result : nullptr;
}
@@ -1357,10 +1348,10 @@ class ModuleDecoderImpl : public Decoder {
kLastKnownModuleSection,
"not enough bits");
WasmError intermediate_error_;
- // Map from function index to wire byte offset of first funcref initialization
- // in global section. Used for deferred checking and proper error reporting if
- // these were not properly declared in the element section.
- std::unordered_map<uint32_t, int> deferred_funcref_error_offsets_;
+ // Set of type offsets discovered in field types during type section decoding.
+ // Since struct types may be recursive, this is used for checking and error
+ // reporting once the whole type section is parsed.
+ std::unordered_map<uint32_t, int> deferred_check_type_index_;
ModuleOrigin origin_;
bool has_seen_unordered_section(SectionCode section_code) {
@@ -1376,7 +1367,7 @@ class ModuleDecoderImpl : public Decoder {
}
bool AddTable(WasmModule* module) {
- if (enabled_features_.has_anyref()) return true;
+ if (enabled_features_.has_reftypes()) return true;
if (module->tables.size() > 0) {
error("At most one table is supported");
return false;
@@ -1401,7 +1392,7 @@ class ModuleDecoderImpl : public Decoder {
global->type = consume_value_type();
global->mutability = consume_mutability();
const byte* pos = pc();
- global->init = consume_init_expr(module, kWasmStmt);
+ global->init = consume_init_expr(module, global->type);
if (global->init.kind == WasmInitExpr::kGlobalIndex) {
uint32_t other_index = global->init.val.global_index;
if (other_index >= index) {
@@ -1413,14 +1404,8 @@ class ModuleDecoderImpl : public Decoder {
errorf(pos,
"type mismatch in global initialization "
"(from global #%u), expected %s, got %s",
- other_index, global->type.type_name(),
- module->globals[other_index].type.type_name());
- }
- } else {
- if (!TypeOf(module, global->init).IsSubTypeOf(global->type)) {
- errorf(pos, "type error in global initialization, expected %s, got %s",
- global->type.type_name(),
- TypeOf(module, global->init).type_name());
+ other_index, global->type.type_name().c_str(),
+ module->globals[other_index].type.type_name().c_str());
}
}
}
@@ -1433,7 +1418,7 @@ class ModuleDecoderImpl : public Decoder {
for (WasmGlobal& global : module->globals) {
if (global.mutability && global.imported) {
global.index = num_imported_mutable_globals++;
- } else if (global.type.IsReferenceType()) {
+ } else if (global.type.is_reference_type()) {
global.offset = tagged_offset;
// All entries in the tagged_globals_buffer have size 1.
tagged_offset++;
@@ -1675,24 +1660,36 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kExprRefNull: {
- if (enabled_features_.has_anyref() || enabled_features_.has_eh()) {
+ if (enabled_features_.has_reftypes() || enabled_features_.has_eh()) {
+ RefNullImmediate<Decoder::kValidate> imm(WasmFeatures::All(), this,
+ pc() - 1);
+ if (!imm.type.is_reference_type()) {
+ errorf(pc() - 1, "ref.null is not supported for %s",
+ imm.type.type_name().c_str());
+ break;
+ }
expr.kind = WasmInitExpr::kRefNullConst;
- len = 0;
+ len = imm.length;
+ if (expected != kWasmStmt &&
+ !IsSubtypeOf(imm.type, expected, module_.get())) {
+ errorf(pos, "type error in init expression, expected %s, got %s",
+ expected.type_name().c_str(), imm.type.type_name().c_str());
+ }
break;
}
V8_FALLTHROUGH;
}
case kExprRefFunc: {
- if (enabled_features_.has_anyref()) {
+ if (enabled_features_.has_reftypes()) {
FunctionIndexImmediate<Decoder::kValidate> imm(this, pc() - 1);
if (module->functions.size() <= imm.index) {
errorf(pc() - 1, "invalid function index: %u", imm.index);
break;
}
- // Defer check for declaration of function reference.
- deferred_funcref_error_offsets_.emplace(imm.index, pc_offset());
expr.kind = WasmInitExpr::kRefFuncConst;
expr.val.function_index = imm.index;
+ // Functions referenced in the globals section count as "declared".
+ module->functions[imm.index].declared = true;
len = imm.length;
break;
}
@@ -1708,9 +1705,13 @@ class ModuleDecoderImpl : public Decoder {
if (!expect_u8("end opcode", kExprEnd)) {
expr.kind = WasmInitExpr::kNone;
}
- if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
+
+ // The type check of ref.null is special, and already done above.
+ if (expected != kWasmStmt && opcode != kExprRefNull &&
+ !IsSubtypeOf(TypeOf(module, expr), expected, module_.get())) {
errorf(pos, "type error in init expression, expected %s, got %s",
- expected.type_name(), TypeOf(module, expr).type_name());
+ expected.type_name().c_str(),
+ TypeOf(module, expr).type_name().c_str());
}
return expr;
}
@@ -1723,49 +1724,60 @@ class ModuleDecoderImpl : public Decoder {
}
ValueType consume_value_type() {
- ValueType result;
- uint32_t type_length = value_type_reader::read_value_type<kValidate>(
- this, this->pc(), &result,
+ uint32_t type_length;
+ ValueType result = value_type_reader::read_value_type<kValidate>(
+ this, this->pc(), &type_length,
origin_ == kWasmOrigin ? enabled_features_ : WasmFeatures::None());
- if (type_length == 0) error(pc_, "invalid value type");
- consume_bytes(type_length);
+ if (result == kWasmBottom) error(pc_, "invalid value type");
+ consume_bytes(type_length, "value type");
return result;
}
- // Reads a single 8-bit integer, interpreting it as a reference type.
- ValueType consume_reference_type() {
- byte val = consume_u8("reference type");
- ValueTypeCode t = static_cast<ValueTypeCode>(val);
- switch (t) {
- case kLocalFuncRef:
- return kWasmFuncRef;
- case kLocalAnyRef:
- if (!enabled_features_.has_anyref()) {
- error(pc_ - 1,
- "Invalid type. Set --experimental-wasm-anyref to use 'AnyRef'");
- }
- return kWasmAnyRef;
- case kLocalNullRef:
- if (!enabled_features_.has_anyref()) {
- error(
- pc_ - 1,
- "Invalid type. Set --experimental-wasm-anyref to use 'NullRef'");
- }
- return kWasmNullRef;
- case kLocalExnRef:
- if (!enabled_features_.has_eh()) {
- error(pc_ - 1,
- "Invalid type. Set --experimental-wasm-eh to use 'ExnRef'");
- }
- return kWasmExnRef;
+ ValueType consume_storage_type() {
+ uint8_t opcode = read_u8<kValidate>(this->pc());
+ switch (opcode) {
+ case kLocalI8:
+ consume_bytes(1, "i8");
+ return kWasmI8;
+ case kLocalI16:
+ consume_bytes(1, "i16");
+ return kWasmI16;
default:
- break;
+ // It is not a packed type, so it has to be a value type.
+ return consume_value_type();
+ }
+ }
+
+ // Reads a reference type for tables and element segment headers.
+ // Note that, unless extensions are enabled, only funcref is allowed.
+ ValueType consume_reference_type() {
+ if (!enabled_features_.has_reftypes()) {
+ uint8_t ref_type = consume_u8("reference type");
+ if (ref_type != kLocalFuncRef) {
+ error(pc_ - 1,
+ "invalid table type. Consider using experimental flags.");
+ return kWasmBottom;
+ }
+ return kWasmFuncRef;
+ } else {
+ const byte* position = pc();
+ ValueType result = consume_value_type();
+ if (!result.is_reference_type()) {
+ error(position, "expected reference type");
+ }
+ return result;
+ }
+ }
+
+ enum DeferIndexCheckMode { kNoCheck, kDeferCheck };
+
+ void defer_index_check(ValueType type) {
+ if (type.has_index()) {
+ deferred_check_type_index_.emplace(type.ref_index(), pc_offset());
}
- error(pc_ - 1, "invalid reference type");
- return kWasmStmt;
}
- const FunctionSig* consume_sig(Zone* zone) {
+ const FunctionSig* consume_sig(Zone* zone, DeferIndexCheckMode defer_check) {
// Parse parameter types.
uint32_t param_count =
consume_count("param count", kV8MaxWasmFunctionParams);
@@ -1773,6 +1785,9 @@ class ModuleDecoderImpl : public Decoder {
std::vector<ValueType> params;
for (uint32_t i = 0; ok() && i < param_count; ++i) {
ValueType param = consume_value_type();
+ if (defer_check == DeferIndexCheckMode::kDeferCheck) {
+ defer_index_check(param);
+ }
params.push_back(param);
}
std::vector<ValueType> returns;
@@ -1784,6 +1799,9 @@ class ModuleDecoderImpl : public Decoder {
if (failed()) return nullptr;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
ValueType ret = consume_value_type();
+ if (defer_check == DeferIndexCheckMode::kDeferCheck) {
+ defer_index_check(ret);
+ }
returns.push_back(ret);
}
@@ -1802,22 +1820,29 @@ class ModuleDecoderImpl : public Decoder {
// TODO(7748): Introduce a proper maximum.
uint32_t field_count = consume_count("field count", 999);
if (failed()) return nullptr;
- std::vector<ValueType> fields;
+ ValueType* fields = zone->NewArray<ValueType>(field_count);
+ bool* mutabilities = zone->NewArray<bool>(field_count);
for (uint32_t i = 0; ok() && i < field_count; ++i) {
- ValueType field = consume_value_type();
- fields.push_back(field);
+ ValueType field = consume_storage_type();
+ defer_index_check(field);
+ fields[i] = field;
+ bool mutability = consume_mutability();
+ mutabilities[i] = mutability;
}
if (failed()) return nullptr;
- ValueType* buffer = zone->NewArray<ValueType>(field_count);
- for (uint32_t i = 0; i < field_count; i++) buffer[i] = fields[i];
uint32_t* offsets = zone->NewArray<uint32_t>(field_count);
- return new (zone) StructType(field_count, offsets, buffer);
+ return new (zone) StructType(field_count, offsets, fields, mutabilities);
}
const ArrayType* consume_array(Zone* zone) {
- ValueType field = consume_value_type();
+ ValueType field = consume_storage_type();
if (failed()) return nullptr;
- return new (zone) ArrayType(field);
+ defer_index_check(field);
+ bool mutability = consume_mutability();
+ if (!mutability) {
+ error(this->pc() - 1, "immutable arrays are not supported yet");
+ }
+ return new (zone) ArrayType(field, mutability);
}
// Consume the attribute field of an exception.
@@ -1837,15 +1862,16 @@ class ModuleDecoderImpl : public Decoder {
WasmInitExpr* offset) {
const byte* pos = pc();
uint8_t flag;
- if (enabled_features_.has_bulk_memory() || enabled_features_.has_anyref()) {
+ if (enabled_features_.has_bulk_memory() ||
+ enabled_features_.has_reftypes()) {
flag = consume_u8("flag");
} else {
uint32_t table_index = consume_u32v("table index");
- // The only valid flag value without bulk_memory or anyref is '0'.
+ // The only valid flag value without bulk_memory or externref is '0'.
if (table_index != 0) {
error(
"Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
return;
}
flag = 0;
@@ -1880,8 +1906,9 @@ class ModuleDecoderImpl : public Decoder {
*status == WasmElemSegment::kStatusActive;
if (*status == WasmElemSegment::kStatusDeclarative &&
- !enabled_features_.has_anyref()) {
- error("Declarative element segments require --experimental-wasm-anyref");
+ !enabled_features_.has_reftypes()) {
+ error(
+ "Declarative element segments require --experimental-wasm-reftypes");
return;
}
if (*status == WasmElemSegment::kStatusPassive &&
@@ -1896,10 +1923,10 @@ class ModuleDecoderImpl : public Decoder {
return;
}
if (flag != 0 && !enabled_features_.has_bulk_memory() &&
- !enabled_features_.has_anyref()) {
+ !enabled_features_.has_reftypes()) {
error(
"Invalid segment flag. Did you forget "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref?");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes?");
return;
}
if ((flag & kFullMask) != flag) {
@@ -1953,10 +1980,10 @@ class ModuleDecoderImpl : public Decoder {
}
} else if (flag == SegmentFlags::kActiveWithIndex) {
if (!(enabled_features_.has_bulk_memory() ||
- enabled_features_.has_anyref())) {
+ enabled_features_.has_reftypes())) {
error(
"Element segments with table indices require "
- "--experimental-wasm-bulk-memory or --experimental-wasm-anyref");
+ "--experimental-wasm-bulk-memory or --experimental-wasm-reftypes");
return;
}
} else if (flag != SegmentFlags::kActiveNoIndex) {
@@ -1999,9 +2026,13 @@ class ModuleDecoderImpl : public Decoder {
uint8_t opcode = consume_u8("element opcode");
if (failed()) return index;
switch (opcode) {
- case kExprRefNull:
+ case kExprRefNull: {
+ RefNullImmediate<kValidate> imm(WasmFeatures::All(), this,
+ this->pc() - 1);
+ consume_bytes(imm.length, "ref.null immediate");
index = WasmElemSegment::kNullIndex;
break;
+ }
case kExprRefFunc:
index = consume_element_func_index();
if (failed()) return index;
@@ -2134,7 +2165,7 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets) {
Decoder decoder(encoded_offsets);
uint32_t functions_count = decoder.consume_u32v("functions count");
- // Sanity check.
+ // Consistency check.
DCHECK_GE(encoded_offsets.size(), functions_count);
functions.reserve(functions_count);
@@ -2297,7 +2328,8 @@ void GenerateNamesFromImportsAndExports(
names) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
+ kind == kExternalTable);
// Extract from import table.
for (const WasmImport& imp : import_table) {
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
index 9dfc1e16081..b48c9635880 100644
--- a/chromium/v8/src/wasm/module-instantiate.cc
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -16,6 +16,7 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-subtyping.h"
#define TRACE(...) \
do { \
@@ -196,7 +197,7 @@ class InstanceBuilder {
void WriteGlobalValue(const WasmGlobal& global,
Handle<WasmGlobalObject> value);
- void WriteGlobalAnyRef(const WasmGlobal& global, Handle<Object> value);
+ void WriteGlobalExternRef(const WasmGlobal& global, Handle<Object> value);
void SanitizeImports();
@@ -304,7 +305,8 @@ InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "InstanceBuilder::Build");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.InstanceBuilder.Build");
// Check that an imports argument was provided, if the module requires it.
// No point in continuing otherwise.
if (!module_->import_table.empty() && ffi_.is_null()) {
@@ -472,7 +474,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// iteration below.
for (int i = 1; i < table_count; ++i) {
const WasmTable& table = module_->tables[i];
- if (table.type == kWasmFuncRef) {
+ if (table.type.heap_type() == kHeapFunc) {
Handle<WasmIndirectFunctionTable> table_obj =
WasmIndirectFunctionTable::New(isolate_, table.initial_size);
tables->set(i, *table_obj);
@@ -524,8 +526,10 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
auto table_object = handle(WasmTableObject::cast(instance->tables().get(
elem_segment.table_index)),
isolate_);
- size_t table_size = table_object->current_length();
- if (!base::IsInBounds(base, elem_segment.entries.size(), table_size)) {
+ uint32_t table_size = table_object->current_length();
+ if (!base::IsInBounds<uint32_t>(
+ base, static_cast<uint32_t>(elem_segment.entries.size()),
+ table_size)) {
thrower_->LinkError("table initializer is out of bounds");
return {};
}
@@ -537,8 +541,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
for (const WasmDataSegment& seg : module_->data_segments) {
if (!seg.active) continue;
uint32_t base = EvalUint32InitExpr(instance, seg.dest_addr);
- if (!base::IsInBounds(base, seg.source.length(),
- instance->memory_size())) {
+ if (!base::IsInBounds<uint64_t>(base, seg.source.length(),
+ instance->memory_size())) {
thrower_->LinkError("data segment is out of bounds");
return {};
}
@@ -616,8 +620,8 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
}
bool InstanceBuilder::ExecuteStartFunction() {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
- "InstanceBuilder::ExecuteStartFunction");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
+ "wasm.ExecuteStartFunction");
if (start_function_.is_null()) return true; // No start function.
HandleScope scope(isolate_);
@@ -730,7 +734,8 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
if (size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(instance, segment.dest_addr);
- DCHECK(base::IsInBounds(dest_offset, size, instance->memory_size()));
+ DCHECK(base::IsInBounds<uint64_t>(dest_offset, size,
+ instance->memory_size()));
byte* dest = instance->memory_start() + dest_offset;
const byte* src = wire_bytes.begin() + segment.source.offset();
memcpy(dest, src, size);
@@ -741,7 +746,7 @@ void InstanceBuilder::LoadDataSegments(Handle<WasmInstanceObject> instance) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
TRACE("init [globals_start=%p + %u] = %lf, type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.type_name());
+ global.type.type_name().c_str());
switch (global.type.kind()) {
case ValueType::kI32:
WriteLittleEndianValue<int32_t>(GetRawGlobalPtr<int32_t>(global),
@@ -767,7 +772,7 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, double num) {
void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global, int64_t num) {
TRACE("init [globals_start=%p + %u] = %" PRId64 ", type = %s\n",
raw_buffer_ptr(untagged_globals_, 0), global.offset, num,
- global.type.type_name());
+ global.type.type_name().c_str());
DCHECK_EQ(kWasmI64, global.type);
WriteLittleEndianValue<int64_t>(GetRawGlobalPtr<int64_t>(global), num);
}
@@ -801,27 +806,25 @@ void InstanceBuilder::WriteGlobalValue(const WasmGlobal& global,
TRACE("%lf", num);
break;
}
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
+ case ValueType::kRtt:
case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- DCHECK_IMPLIES(global.type == kWasmNullRef, value->GetRef()->IsNull());
+ case ValueType::kOptRef: {
tagged_globals_->set(global.offset, *value->GetRef());
break;
}
case ValueType::kStmt:
case ValueType::kS128:
case ValueType::kBottom:
+ case ValueType::kI8:
+ case ValueType::kI16:
UNREACHABLE();
}
- TRACE(", type = %s (from WebAssembly.Global)\n", global.type.type_name());
+ TRACE(", type = %s (from WebAssembly.Global)\n",
+ global.type.type_name().c_str());
}
-void InstanceBuilder::WriteGlobalAnyRef(const WasmGlobal& global,
- Handle<Object> value) {
+void InstanceBuilder::WriteGlobalExternRef(const WasmGlobal& global,
+ Handle<Object> value) {
tagged_globals_->set(global.offset, *value, UPDATE_WRITE_BARRIER);
}
@@ -1046,7 +1049,7 @@ bool InstanceBuilder::ProcessImportedTable(Handle<WasmInstanceObject> instance,
return false;
}
- if (table.type == kWasmFuncRef &&
+ if (table.type.heap_type() == kHeapFunc &&
!InitializeImportedIndirectFunctionTable(instance, table_index,
import_index, table_object)) {
return false;
@@ -1113,13 +1116,14 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
Handle<WasmInstanceObject> instance, int import_index,
Handle<String> module_name, Handle<String> import_name,
const WasmGlobal& global, Handle<WasmGlobalObject> global_object) {
- if (global_object->is_mutable() != global.mutability) {
+ if (static_cast<bool>(global_object->is_mutable()) != global.mutability) {
ReportLinkError("imported global does not match the expected mutability",
import_index, module_name, import_name);
return false;
}
- bool is_sub_type = global_object->type().IsSubTypeOf(global.type);
+ bool is_sub_type =
+ IsSubtypeOf(global_object->type(), global.type, instance->module());
bool is_same_type = global_object->type() == global.type;
bool valid_type = global.mutability ? is_same_type : is_sub_type;
@@ -1132,12 +1136,13 @@ bool InstanceBuilder::ProcessImportedWasmGlobalObject(
DCHECK_LT(global.index, module_->num_imported_mutable_globals);
Handle<Object> buffer;
Address address_or_offset;
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
static_assert(sizeof(global_object->offset()) <= sizeof(Address),
"The offset into the globals buffer does not fit into "
"the imported_mutable_globals array");
buffer = handle(global_object->tagged_buffer(), isolate_);
- // For anyref globals we use a relative offset, not an absolute address.
+ // For externref globals we use a relative offset, not an absolute
+ // address.
address_or_offset = static_cast<Address>(global_object->offset());
} else {
buffer = handle(global_object->untagged_buffer(), isolate_);
@@ -1210,8 +1215,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
return false;
}
- if (global.type.IsReferenceType()) {
- if (global.type == kWasmFuncRef) {
+ if (global.type.is_reference_type()) {
+ if (global.type.heap_type() == kHeapFunc) {
if (!value->IsNull(isolate_) &&
!WasmExportedFunction::IsWasmExportedFunction(*value)) {
ReportLinkError(
@@ -1219,14 +1224,8 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
import_index, module_name, import_name);
return false;
}
- } else if (global.type == kWasmNullRef) {
- if (!value->IsNull(isolate_)) {
- ReportLinkError("imported nullref global must be null", import_index,
- module_name, import_name);
- return false;
- }
}
- WriteGlobalAnyRef(global, value);
+ WriteGlobalExternRef(global, value);
return true;
}
@@ -1412,7 +1411,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
global.init.val.f64_const);
break;
case WasmInitExpr::kRefNullConst:
- DCHECK(enabled_.has_anyref() || enabled_.has_eh());
+ DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
if (global.imported) break; // We already initialized imported globals.
tagged_globals_->set(global.offset,
@@ -1420,7 +1419,7 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
SKIP_WRITE_BARRIER);
break;
case WasmInitExpr::kRefFuncConst: {
- DCHECK(enabled_.has_anyref());
+ DCHECK(enabled_.has_reftypes());
auto function = WasmInstanceObject::GetOrCreateWasmExternalFunction(
isolate_, instance, global.init.val.function_index);
tagged_globals_->set(global.offset, *function);
@@ -1432,8 +1431,8 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
uint32_t old_offset =
module_->globals[global.init.val.global_index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset, old_offset);
- if (global.type.IsReferenceType()) {
- DCHECK(enabled_.has_anyref() || enabled_.has_eh());
+ if (global.type.is_reference_type()) {
+ DCHECK(enabled_.has_reftypes() || enabled_.has_eh());
tagged_globals_->set(new_offset, tagged_globals_->get(old_offset));
} else {
size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
@@ -1483,7 +1482,7 @@ bool InstanceBuilder::AllocateMemory() {
bool InstanceBuilder::NeedsWrappers() const {
if (module_->num_exported_functions > 0) return true;
for (auto& table : module_->tables) {
- if (table.type == kWasmFuncRef) return true;
+ if (table.type.heap_type() == kHeapFunc) return true;
}
return false;
}
@@ -1571,10 +1570,10 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
if (global.mutability && global.imported) {
Handle<FixedArray> buffers_array(
instance->imported_mutable_globals_buffers(), isolate_);
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
tagged_buffer = handle(
FixedArray::cast(buffers_array->get(global.index)), isolate_);
- // For anyref globals we store the relative offset in the
+ // For externref globals we store the relative offset in the
// imported_mutable_globals array instead of an absolute address.
Address addr = instance->imported_mutable_globals()[global.index];
DCHECK_LE(addr, static_cast<Address>(
@@ -1595,7 +1594,7 @@ void InstanceBuilder::ProcessExports(Handle<WasmInstanceObject> instance) {
offset = static_cast<uint32_t>(global_addr - backing_store);
}
} else {
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
tagged_buffer = handle(instance->tagged_globals_buffer(), isolate_);
} else {
untagged_buffer =
@@ -1656,7 +1655,7 @@ void InstanceBuilder::InitializeIndirectFunctionTables(
for (int i = 0; i < static_cast<int>(module_->tables.size()); ++i) {
const WasmTable& table = module_->tables[i];
- if (table.type == kWasmFuncRef) {
+ if (table.type.heap_type() == kHeapFunc) {
WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
instance, i, table.initial_size);
}
@@ -1672,11 +1671,12 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
// TODO(wasm): Move this functionality into wasm-objects, since it is used
// for both instantiation and in the implementation of the table.init
// instruction.
- if (!base::IsInBounds(dst, count, table_object->current_length()) ||
- !base::IsInBounds(src, count,
- instance->dropped_elem_segments()[segment_index] == 0
- ? elem_segment.entries.size()
- : 0)) {
+ if (!base::IsInBounds<uint64_t>(dst, count, table_object->current_length()) ||
+ !base::IsInBounds<uint64_t>(
+ src, count,
+ instance->dropped_elem_segments()[segment_index] == 0
+ ? elem_segment.entries.size()
+ : 0)) {
return false;
}
@@ -1686,7 +1686,7 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
int entry_index = static_cast<int>(dst + i);
if (func_index == WasmElemSegment::kNullIndex) {
- if (table_object->type() == kWasmFuncRef) {
+ if (table_object->type().heap_type() == kHeapFunc) {
IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
WasmTableObject::Set(isolate, table_object, entry_index,
@@ -1697,15 +1697,15 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmFunction* function = &module->functions[func_index];
// Update the local dispatch table first if necessary.
- if (table_object->type() == kWasmFuncRef) {
+ if (table_object->type().heap_type() == kHeapFunc) {
uint32_t sig_id = module->signature_ids[function->sig_index];
IndirectFunctionTableEntry(instance, table_index, entry_index)
.Set(sig_id, instance, func_index);
}
- // For AnyRef tables, we have to generate the WasmExternalFunction eagerly.
- // Later we cannot know if an entry is a placeholder or not.
- if (table_object->type() == kWasmAnyRef) {
+ // For ExternRef tables, we have to generate the WasmExternalFunction
+ // eagerly. Later we cannot know if an entry is a placeholder or not.
+ if (table_object->type().heap_type() == kHeapExtern) {
Handle<WasmExternalFunction> wasm_external_function =
WasmInstanceObject::GetOrCreateWasmExternalFunction(isolate, instance,
func_index);
@@ -1772,7 +1772,7 @@ void InstanceBuilder::LoadTableSegments(Handle<WasmInstanceObject> instance) {
int table_count = static_cast<int>(module_->tables.size());
for (int index = 0; index < table_count; ++index) {
- if (module_->tables[index].type == kWasmFuncRef) {
+ if (module_->tables[index].type.heap_type() == kHeapFunc) {
auto table_object = handle(
WasmTableObject::cast(instance->tables().get(index)), isolate_);
diff --git a/chromium/v8/src/wasm/streaming-decoder.cc b/chromium/v8/src/wasm/streaming-decoder.cc
index c88a2c77b89..eb297807007 100644
--- a/chromium/v8/src/wasm/streaming-decoder.cc
+++ b/chromium/v8/src/wasm/streaming-decoder.cc
@@ -25,7 +25,203 @@ namespace v8 {
namespace internal {
namespace wasm {
-void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+class V8_EXPORT_PRIVATE AsyncStreamingDecoder : public StreamingDecoder {
+ public:
+ explicit AsyncStreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+
+ // The buffer passed into OnBytesReceived is owned by the caller.
+ void OnBytesReceived(Vector<const uint8_t> bytes) override;
+
+ void Finish() override;
+
+ void Abort() override;
+
+ // Notify the StreamingDecoder that compilation ended and the
+ // StreamingProcessor should not be called anymore.
+ void NotifyCompilationEnded() override { Fail(); }
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) override;
+
+ private:
+ // The SectionBuffer is the data object for the content of a single section.
+ // It stores all bytes of the section (including section id and section
+ // length), and the offset where the actual payload starts.
+ class SectionBuffer : public WireBytesStorage {
+ public:
+ // id: The section id.
+ // payload_length: The length of the payload.
+ // length_bytes: The section length, as it is encoded in the module bytes.
+ SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
+ Vector<const uint8_t> length_bytes)
+ : // ID + length + payload
+ module_offset_(module_offset),
+ bytes_(OwnedVector<uint8_t>::NewForOverwrite(
+ 1 + length_bytes.length() + payload_length)),
+ payload_offset_(1 + length_bytes.length()) {
+ bytes_.start()[0] = id;
+ memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
+ }
+
+ SectionCode section_code() const {
+ return static_cast<SectionCode>(bytes_.start()[0]);
+ }
+
+ Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
+ DCHECK_LE(module_offset_, ref.offset());
+ uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
+ return bytes().SubVector(offset_in_code_buffer,
+ offset_in_code_buffer + ref.length());
+ }
+
+ uint32_t module_offset() const { return module_offset_; }
+ Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
+ Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
+ size_t length() const { return bytes_.size(); }
+ size_t payload_offset() const { return payload_offset_; }
+
+ private:
+ const uint32_t module_offset_;
+ const OwnedVector<uint8_t> bytes_;
+ const size_t payload_offset_;
+ };
+
+ // The decoding of a stream of wasm module bytes is organized in states. Each
+ // state provides a buffer to store the bytes required for the current state,
+ // information on how many bytes have already been received, how many bytes
+ // are needed, and a {Next} function which starts the next state once all
+ // bytes of the current state were received.
+ //
+ // The states change according to the following state diagram:
+ //
+ // Start
+ // |
+ // |
+ // v
+ // DecodeModuleHeader
+ // | _________________________________________
+ // | | |
+ // v v |
+ // DecodeSectionID --> DecodeSectionLength --> DecodeSectionPayload
+ // A |
+ // | | (if the section id == code)
+ // | v
+ // | DecodeNumberOfFunctions -- > DecodeFunctionLength
+ // | A |
+ // | | |
+ // | (after all functions were read) | v
+ // ------------------------------------- DecodeFunctionBody
+ //
+ class DecodingState {
+ public:
+ virtual ~DecodingState() = default;
+
+ // Reads the bytes for the current state and returns the number of read
+ // bytes.
+ virtual size_t ReadBytes(AsyncStreamingDecoder* streaming,
+ Vector<const uint8_t> bytes);
+
+ // Returns the next state of the streaming decoding.
+ virtual std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) = 0;
+ // The buffer to store the received bytes.
+ virtual Vector<uint8_t> buffer() = 0;
+ // The number of bytes which were already received.
+ size_t offset() const { return offset_; }
+ void set_offset(size_t value) { offset_ = value; }
+ // A flag to indicate if finishing the streaming decoder is allowed without
+ // error.
+ virtual bool is_finishing_allowed() const { return false; }
+
+ private:
+ size_t offset_ = 0;
+ };
+
+ // Forward declarations of the concrete states. This is needed so that they
+ // can access private members of the AsyncStreamingDecoder.
+ class DecodeVarInt32;
+ class DecodeModuleHeader;
+ class DecodeSectionID;
+ class DecodeSectionLength;
+ class DecodeSectionPayload;
+ class DecodeNumberOfFunctions;
+ class DecodeFunctionLength;
+ class DecodeFunctionBody;
+
+ // Creates a buffer for the next section of the module.
+ SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
+ size_t length,
+ Vector<const uint8_t> length_bytes);
+
+ std::unique_ptr<DecodingState> Error(const WasmError& error) {
+ if (ok()) processor_->OnError(error);
+ Fail();
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ std::unique_ptr<DecodingState> Error(std::string message) {
+ return Error(WasmError{module_offset_ - 1, std::move(message)});
+ }
+
+ void ProcessModuleHeader() {
+ if (!ok()) return;
+ if (!processor_->ProcessModuleHeader(state_->buffer(), 0)) Fail();
+ }
+
+ void ProcessSection(SectionBuffer* buffer) {
+ if (!ok()) return;
+ if (!processor_->ProcessSection(
+ buffer->section_code(), buffer->payload(),
+ buffer->module_offset() +
+ static_cast<uint32_t>(buffer->payload_offset()))) {
+ Fail();
+ }
+ }
+
+ void StartCodeSection(int num_functions,
+ std::shared_ptr<WireBytesStorage> wire_bytes_storage,
+ int code_section_length) {
+ if (!ok()) return;
+ // The offset passed to {ProcessCodeSectionHeader} is an error offset and
+ // not the start offset of a buffer. Therefore we need the -1 here.
+ if (!processor_->ProcessCodeSectionHeader(
+ num_functions, module_offset() - 1, std::move(wire_bytes_storage),
+ code_section_length)) {
+ Fail();
+ }
+ }
+
+ void ProcessFunctionBody(Vector<const uint8_t> bytes,
+ uint32_t module_offset) {
+ if (!ok()) return;
+ if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
+ }
+
+ void Fail() {
+ // We reset the {processor_} field to represent failure. This also ensures
+ // that we do not accidentally call further methods on the processor after
+ // failure.
+ processor_.reset();
+ }
+
+ bool ok() const { return processor_ != nullptr; }
+
+ uint32_t module_offset() const { return module_offset_; }
+
+ std::unique_ptr<StreamingProcessor> processor_;
+ std::unique_ptr<DecodingState> state_;
+ std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
+ bool code_section_processed_ = false;
+ uint32_t module_offset_ = 0;
+ size_t total_size_ = 0;
+
+ // We need wire bytes in an array for deserializing cached modules.
+ std::vector<uint8_t> wire_bytes_for_deserializing_;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncStreamingDecoder);
+};
+
+void AsyncStreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
if (deserializing()) {
wire_bytes_for_deserializing_.insert(wire_bytes_for_deserializing_.end(),
bytes.begin(), bytes.end());
@@ -50,8 +246,8 @@ void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
}
}
-size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
- Vector<const uint8_t> bytes) {
+size_t AsyncStreamingDecoder::DecodingState::ReadBytes(
+ AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
Vector<uint8_t> remaining_buf = buffer() + offset();
size_t num_bytes = std::min(bytes.size(), remaining_buf.size());
TRACE_STREAMING("ReadBytes(%zu bytes)\n", num_bytes);
@@ -60,7 +256,7 @@ size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
return num_bytes;
}
-void StreamingDecoder::Finish() {
+void AsyncStreamingDecoder::Finish() {
TRACE_STREAMING("Finish\n");
if (!ok()) return;
@@ -82,7 +278,8 @@ void StreamingDecoder::Finish() {
return;
}
- OwnedVector<uint8_t> bytes = OwnedVector<uint8_t>::New(total_size_);
+ OwnedVector<uint8_t> bytes =
+ OwnedVector<uint8_t>::NewForOverwrite(total_size_);
uint8_t* cursor = bytes.start();
{
#define BYTES(x) (x & 0xFF), (x >> 8) & 0xFF, (x >> 16) & 0xFF, (x >> 24) & 0xFF
@@ -99,31 +296,20 @@ void StreamingDecoder::Finish() {
processor_->OnFinishedStream(std::move(bytes));
}
-void StreamingDecoder::Abort() {
+void AsyncStreamingDecoder::Abort() {
TRACE_STREAMING("Abort\n");
if (!ok()) return; // Failed already.
processor_->OnAbort();
Fail();
}
-void StreamingDecoder::SetModuleCompiledCallback(
- ModuleCompiledCallback callback) {
- DCHECK_NULL(module_compiled_callback_);
- module_compiled_callback_ = callback;
-}
-
-bool StreamingDecoder::SetCompiledModuleBytes(
- Vector<const uint8_t> compiled_module_bytes) {
- compiled_module_bytes_ = compiled_module_bytes;
- return true;
-}
-
namespace {
class TopTierCompiledCallback {
public:
- TopTierCompiledCallback(std::weak_ptr<NativeModule> native_module,
- StreamingDecoder::ModuleCompiledCallback callback)
+ TopTierCompiledCallback(
+ std::weak_ptr<NativeModule> native_module,
+ AsyncStreamingDecoder::ModuleCompiledCallback callback)
: native_module_(std::move(native_module)),
callback_(std::move(callback)) {}
@@ -142,7 +328,7 @@ class TopTierCompiledCallback {
private:
const std::weak_ptr<NativeModule> native_module_;
- const StreamingDecoder::ModuleCompiledCallback callback_;
+ const AsyncStreamingDecoder::ModuleCompiledCallback callback_;
#ifdef DEBUG
mutable bool called_ = false;
#endif
@@ -150,7 +336,7 @@ class TopTierCompiledCallback {
} // namespace
-void StreamingDecoder::NotifyNativeModuleCreated(
+void AsyncStreamingDecoder::NotifyNativeModuleCreated(
const std::shared_ptr<NativeModule>& native_module) {
if (!module_compiled_callback_) return;
auto* comp_state = native_module->compilation_state();
@@ -162,20 +348,21 @@ void StreamingDecoder::NotifyNativeModuleCreated(
// An abstract class to share code among the states which decode VarInts. This
// class takes over the decoding of the VarInt and then calls the actual decode
// code with the decoded value.
-class StreamingDecoder::DecodeVarInt32 : public DecodingState {
+class AsyncStreamingDecoder::DecodeVarInt32 : public DecodingState {
public:
explicit DecodeVarInt32(size_t max_value, const char* field_name)
: max_value_(max_value), field_name_(field_name) {}
Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
- size_t ReadBytes(StreamingDecoder* streaming,
+ size_t ReadBytes(AsyncStreamingDecoder* streaming,
Vector<const uint8_t> bytes) override;
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
virtual std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) = 0;
+ AsyncStreamingDecoder* streaming) = 0;
protected:
uint8_t byte_buffer_[kMaxVarInt32Size];
@@ -187,11 +374,12 @@ class StreamingDecoder::DecodeVarInt32 : public DecodingState {
size_t bytes_consumed_ = 0;
};
-class StreamingDecoder::DecodeModuleHeader : public DecodingState {
+class AsyncStreamingDecoder::DecodeModuleHeader : public DecodingState {
public:
Vector<uint8_t> buffer() override { return ArrayVector(byte_buffer_); }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
// Checks if the magic bytes of the module header are correct.
@@ -202,7 +390,7 @@ class StreamingDecoder::DecodeModuleHeader : public DecodingState {
uint8_t byte_buffer_[kModuleHeaderSize];
};
-class StreamingDecoder::DecodeSectionID : public DecodingState {
+class AsyncStreamingDecoder::DecodeSectionID : public DecodingState {
public:
explicit DecodeSectionID(uint32_t module_offset)
: module_offset_(module_offset) {}
@@ -210,7 +398,8 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
Vector<uint8_t> buffer() override { return {&id_, 1}; }
bool is_finishing_allowed() const override { return true; }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
uint8_t id_ = 0;
@@ -218,7 +407,7 @@ class StreamingDecoder::DecodeSectionID : public DecodingState {
const uint32_t module_offset_;
};
-class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
public:
explicit DecodeSectionLength(uint8_t id, uint32_t module_offset)
: DecodeVarInt32(kV8MaxWasmModuleSize, "section length"),
@@ -226,7 +415,7 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
module_offset_(module_offset) {}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
const uint8_t section_id_;
@@ -234,33 +423,34 @@ class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
const uint32_t module_offset_;
};
-class StreamingDecoder::DecodeSectionPayload : public DecodingState {
+class AsyncStreamingDecoder::DecodeSectionPayload : public DecodingState {
public:
explicit DecodeSectionPayload(SectionBuffer* section_buffer)
: section_buffer_(section_buffer) {}
Vector<uint8_t> buffer() override { return section_buffer_->payload(); }
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
};
-class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
public:
explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
: DecodeVarInt32(kV8MaxWasmFunctions, "functions count"),
section_buffer_(section_buffer) {}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
};
-class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
+class AsyncStreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
public:
explicit DecodeFunctionLength(SectionBuffer* section_buffer,
size_t buffer_offset,
@@ -274,7 +464,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
}
std::unique_ptr<DecodingState> NextWithValue(
- StreamingDecoder* streaming) override;
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
@@ -282,7 +472,7 @@ class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
const size_t num_remaining_functions_;
};
-class StreamingDecoder::DecodeFunctionBody : public DecodingState {
+class AsyncStreamingDecoder::DecodeFunctionBody : public DecodingState {
public:
explicit DecodeFunctionBody(SectionBuffer* section_buffer,
size_t buffer_offset, size_t function_body_length,
@@ -300,7 +490,8 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
return remaining_buffer.SubVector(0, function_body_length_);
}
- std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+ std::unique_ptr<DecodingState> Next(
+ AsyncStreamingDecoder* streaming) override;
private:
SectionBuffer* const section_buffer_;
@@ -310,8 +501,8 @@ class StreamingDecoder::DecodeFunctionBody : public DecodingState {
const uint32_t module_offset_;
};
-size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
- StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
+size_t AsyncStreamingDecoder::DecodeVarInt32::ReadBytes(
+ AsyncStreamingDecoder* streaming, Vector<const uint8_t> bytes) {
Vector<uint8_t> buf = buffer();
Vector<uint8_t> remaining_buf = buf + offset();
size_t new_bytes = std::min(bytes.size(), remaining_buf.size());
@@ -344,8 +535,8 @@ size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
return new_bytes;
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeVarInt32::Next(AsyncStreamingDecoder* streaming) {
if (!streaming->ok()) return nullptr;
if (value_ > max_value_) {
@@ -358,16 +549,17 @@ StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
return NextWithValue(streaming);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeModuleHeader::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeModuleHeader\n");
streaming->ProcessModuleHeader();
if (!streaming->ok()) return nullptr;
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionID::Next(AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionID: %s section\n",
SectionName(static_cast<SectionCode>(id_)));
if (id_ == SectionCode::kCodeSectionCode) {
@@ -383,9 +575,9 @@ StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
return std::make_unique<DecodeSectionLength>(id_, module_offset_);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionLength::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionLength::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionLength(%zu)\n", value_);
SectionBuffer* buf =
streaming->CreateNewBuffer(module_offset_, section_id_, value_,
@@ -410,17 +602,18 @@ StreamingDecoder::DecodeSectionLength::NextWithValue(
return std::make_unique<DecodeSectionPayload>(buf);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeSectionPayload::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeSectionPayload\n");
streaming->ProcessSection(section_buffer_);
if (!streaming->ok()) return nullptr;
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeNumberOfFunctions(%zu)\n", value_);
// Copy the bytes we read into the section buffer.
Vector<uint8_t> payload_buf = section_buffer_->payload();
@@ -449,9 +642,9 @@ StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
value_);
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeFunctionLength::NextWithValue(
- StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeFunctionLength::NextWithValue(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionLength(%zu)\n", value_);
// Copy the bytes we consumed into the section buffer.
Vector<uint8_t> fun_length_buffer = section_buffer_->bytes() + buffer_offset_;
@@ -472,8 +665,9 @@ StreamingDecoder::DecodeFunctionLength::NextWithValue(
num_remaining_functions_, streaming->module_offset());
}
-std::unique_ptr<StreamingDecoder::DecodingState>
-StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
+std::unique_ptr<AsyncStreamingDecoder::DecodingState>
+AsyncStreamingDecoder::DecodeFunctionBody::Next(
+ AsyncStreamingDecoder* streaming) {
TRACE_STREAMING("DecodeFunctionBody\n");
streaming->ProcessFunctionBody(buffer(), module_offset_);
if (!streaming->ok()) return nullptr;
@@ -490,13 +684,13 @@ StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
return std::make_unique<DecodeSectionID>(streaming->module_offset());
}
-StreamingDecoder::StreamingDecoder(
+AsyncStreamingDecoder::AsyncStreamingDecoder(
std::unique_ptr<StreamingProcessor> processor)
: processor_(std::move(processor)),
// A module always starts with a module header.
state_(new DecodeModuleHeader()) {}
-StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
+AsyncStreamingDecoder::SectionBuffer* AsyncStreamingDecoder::CreateNewBuffer(
uint32_t module_offset, uint8_t section_id, size_t length,
Vector<const uint8_t> length_bytes) {
// Section buffers are allocated in the same order they appear in the module,
@@ -506,6 +700,11 @@ StreamingDecoder::SectionBuffer* StreamingDecoder::CreateNewBuffer(
return section_buffers_.back().get();
}
+std::unique_ptr<StreamingDecoder> StreamingDecoder::CreateAsyncStreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor) {
+ return std::make_unique<AsyncStreamingDecoder>(std::move(processor));
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/streaming-decoder.h b/chromium/v8/src/wasm/streaming-decoder.h
index f3203e70274..bdf3218d1ef 100644
--- a/chromium/v8/src/wasm/streaming-decoder.h
+++ b/chromium/v8/src/wasm/streaming-decoder.h
@@ -12,6 +12,7 @@
#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/wasm-constants.h"
+#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -66,220 +67,57 @@ class V8_EXPORT_PRIVATE StreamingProcessor {
// and function bodies.
class V8_EXPORT_PRIVATE StreamingDecoder {
public:
- explicit StreamingDecoder(std::unique_ptr<StreamingProcessor> processor);
+ virtual ~StreamingDecoder() = default;
// The buffer passed into OnBytesReceived is owned by the caller.
- void OnBytesReceived(Vector<const uint8_t> bytes);
+ virtual void OnBytesReceived(Vector<const uint8_t> bytes) = 0;
- void Finish();
+ virtual void Finish() = 0;
- void Abort();
+ virtual void Abort() = 0;
// Notify the StreamingDecoder that compilation ended and the
// StreamingProcessor should not be called anymore.
- void NotifyCompilationEnded() { Fail(); }
+ virtual void NotifyCompilationEnded() = 0;
// Caching support.
// Sets the callback that is called after the module is fully compiled.
using ModuleCompiledCallback =
std::function<void(const std::shared_ptr<NativeModule>&)>;
- void SetModuleCompiledCallback(ModuleCompiledCallback callback);
- // Passes previously compiled module bytes from the embedder's cache.
- bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes);
-
- void NotifyNativeModuleCreated(
- const std::shared_ptr<NativeModule>& native_module);
-
- Vector<const char> url() { return VectorOf(url_); }
- void SetUrl(Vector<const char> url) {
- url_.assign(url.begin(), url.length());
- }
-
- private:
- // TODO(ahaas): Put the whole private state of the StreamingDecoder into the
- // cc file (PIMPL design pattern).
-
- // The SectionBuffer is the data object for the content of a single section.
- // It stores all bytes of the section (including section id and section
- // length), and the offset where the actual payload starts.
- class SectionBuffer : public WireBytesStorage {
- public:
- // id: The section id.
- // payload_length: The length of the payload.
- // length_bytes: The section length, as it is encoded in the module bytes.
- SectionBuffer(uint32_t module_offset, uint8_t id, size_t payload_length,
- Vector<const uint8_t> length_bytes)
- : // ID + length + payload
- module_offset_(module_offset),
- bytes_(OwnedVector<uint8_t>::New(1 + length_bytes.length() +
- payload_length)),
- payload_offset_(1 + length_bytes.length()) {
- bytes_.start()[0] = id;
- memcpy(bytes_.start() + 1, &length_bytes.first(), length_bytes.length());
- }
-
- SectionCode section_code() const {
- return static_cast<SectionCode>(bytes_.start()[0]);
- }
-
- Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
- DCHECK_LE(module_offset_, ref.offset());
- uint32_t offset_in_code_buffer = ref.offset() - module_offset_;
- return bytes().SubVector(offset_in_code_buffer,
- offset_in_code_buffer + ref.length());
- }
-
- uint32_t module_offset() const { return module_offset_; }
- Vector<uint8_t> bytes() const { return bytes_.as_vector(); }
- Vector<uint8_t> payload() const { return bytes() + payload_offset_; }
- size_t length() const { return bytes_.size(); }
- size_t payload_offset() const { return payload_offset_; }
-
- private:
- const uint32_t module_offset_;
- const OwnedVector<uint8_t> bytes_;
- const size_t payload_offset_;
- };
-
- // The decoding of a stream of wasm module bytes is organized in states. Each
- // state provides a buffer to store the bytes required for the current state,
- // information on how many bytes have already been received, how many bytes
- // are needed, and a {Next} function which starts the next state once all
- // bytes of the current state were received.
- //
- // The states change according to the following state diagram:
- //
- // Start
- // |
- // |
- // v
- // DecodeModuleHeader
- // | _________________________________________
- // | | |
- // v v |
- // DecodeSectionID --> DecodeSectionLength --> DecodeSectionPayload
- // A |
- // | | (if the section id == code)
- // | v
- // | DecodeNumberOfFunctions -- > DecodeFunctionLength
- // | A |
- // | | |
- // | (after all functions were read) | v
- // ------------------------------------- DecodeFunctionBody
- //
- class DecodingState {
- public:
- virtual ~DecodingState() = default;
-
- // Reads the bytes for the current state and returns the number of read
- // bytes.
- virtual size_t ReadBytes(StreamingDecoder* streaming,
- Vector<const uint8_t> bytes);
-
- // Returns the next state of the streaming decoding.
- virtual std::unique_ptr<DecodingState> Next(
- StreamingDecoder* streaming) = 0;
- // The buffer to store the received bytes.
- virtual Vector<uint8_t> buffer() = 0;
- // The number of bytes which were already received.
- size_t offset() const { return offset_; }
- void set_offset(size_t value) { offset_ = value; }
- // A flag to indicate if finishing the streaming decoder is allowed without
- // error.
- virtual bool is_finishing_allowed() const { return false; }
- private:
- size_t offset_ = 0;
- };
-
- // Forward declarations of the concrete states. This is needed so that they
- // can access private members of the StreamingDecoder.
- class DecodeVarInt32;
- class DecodeModuleHeader;
- class DecodeSectionID;
- class DecodeSectionLength;
- class DecodeSectionPayload;
- class DecodeNumberOfFunctions;
- class DecodeFunctionLength;
- class DecodeFunctionBody;
-
- // Creates a buffer for the next section of the module.
- SectionBuffer* CreateNewBuffer(uint32_t module_offset, uint8_t section_id,
- size_t length,
- Vector<const uint8_t> length_bytes);
-
- std::unique_ptr<DecodingState> Error(const WasmError& error) {
- if (ok()) processor_->OnError(error);
- Fail();
- return std::unique_ptr<DecodingState>(nullptr);
- }
-
- std::unique_ptr<DecodingState> Error(std::string message) {
- return Error(WasmError{module_offset_ - 1, std::move(message)});
- }
-
- void ProcessModuleHeader() {
- if (!ok()) return;
- if (!processor_->ProcessModuleHeader(state_->buffer(), 0)) Fail();
+ void SetModuleCompiledCallback(ModuleCompiledCallback callback) {
+ module_compiled_callback_ = callback;
}
- void ProcessSection(SectionBuffer* buffer) {
- if (!ok()) return;
- if (!processor_->ProcessSection(
- buffer->section_code(), buffer->payload(),
- buffer->module_offset() +
- static_cast<uint32_t>(buffer->payload_offset()))) {
- Fail();
- }
+ // Passes previously compiled module bytes from the embedder's cache.
+ bool SetCompiledModuleBytes(Vector<const uint8_t> compiled_module_bytes) {
+ compiled_module_bytes_ = compiled_module_bytes;
+ return true;
}
- void StartCodeSection(int num_functions,
- std::shared_ptr<WireBytesStorage> wire_bytes_storage,
- int code_section_length) {
- if (!ok()) return;
- // The offset passed to {ProcessCodeSectionHeader} is an error offset and
- // not the start offset of a buffer. Therefore we need the -1 here.
- if (!processor_->ProcessCodeSectionHeader(
- num_functions, module_offset() - 1, std::move(wire_bytes_storage),
- code_section_length)) {
- Fail();
- }
- }
+ virtual void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>& native_module) = 0;
- void ProcessFunctionBody(Vector<const uint8_t> bytes,
- uint32_t module_offset) {
- if (!ok()) return;
- if (!processor_->ProcessFunctionBody(bytes, module_offset)) Fail();
- }
+ Vector<const char> url() { return VectorOf(url_); }
- void Fail() {
- // We reset the {processor_} field to represent failure. This also ensures
- // that we do not accidentally call further methods on the processor after
- // failure.
- processor_.reset();
+ void SetUrl(Vector<const char> url) {
+ url_.assign(url.begin(), url.length());
}
- bool ok() const { return processor_ != nullptr; }
+ static std::unique_ptr<StreamingDecoder> CreateAsyncStreamingDecoder(
+ std::unique_ptr<StreamingProcessor> processor);
- uint32_t module_offset() const { return module_offset_; }
+ static std::unique_ptr<StreamingDecoder> CreateSyncStreamingDecoder(
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver);
+ protected:
bool deserializing() const { return !compiled_module_bytes_.empty(); }
- std::unique_ptr<StreamingProcessor> processor_;
- std::unique_ptr<DecodingState> state_;
- std::vector<std::shared_ptr<SectionBuffer>> section_buffers_;
- bool code_section_processed_ = false;
- uint32_t module_offset_ = 0;
- size_t total_size_ = 0;
std::string url_;
-
- // Caching support.
- ModuleCompiledCallback module_compiled_callback_ = nullptr;
- // We need wire bytes in an array for deserializing cached modules.
- std::vector<uint8_t> wire_bytes_for_deserializing_;
+ ModuleCompiledCallback module_compiled_callback_;
Vector<const uint8_t> compiled_module_bytes_;
-
- DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/struct-types.h b/chromium/v8/src/wasm/struct-types.h
index 6cd4271c24b..cee563b89f4 100644
--- a/chromium/v8/src/wasm/struct-types.h
+++ b/chromium/v8/src/wasm/struct-types.h
@@ -18,8 +18,11 @@ namespace wasm {
class StructType : public ZoneObject {
public:
StructType(uint32_t field_count, uint32_t* field_offsets,
- const ValueType* reps)
- : field_count_(field_count), field_offsets_(field_offsets), reps_(reps) {
+ const ValueType* reps, const bool* mutabilities)
+ : field_count_(field_count),
+ field_offsets_(field_offsets),
+ reps_(reps),
+ mutabilities_(mutabilities) {
InitializeOffsets();
}
@@ -30,15 +33,26 @@ class StructType : public ZoneObject {
return reps_[index];
}
+ bool mutability(uint32_t index) const {
+ DCHECK_LT(index, field_count_);
+ return mutabilities_[index];
+ }
+
// Iteration support.
base::iterator_range<const ValueType*> fields() const {
return {reps_, reps_ + field_count_};
}
+ base::iterator_range<const bool*> mutabilities() const {
+ return {mutabilities_, mutabilities_ + field_count_};
+ }
bool operator==(const StructType& other) const {
if (this == &other) return true;
if (field_count() != other.field_count()) return false;
- return std::equal(fields().begin(), fields().end(), other.fields().begin());
+ return std::equal(fields().begin(), fields().end(),
+ other.fields().begin()) &&
+ std::equal(mutabilities().begin(), mutabilities().end(),
+ other.mutabilities().begin());
}
bool operator!=(const StructType& other) const { return !(*this == other); }
@@ -70,17 +84,20 @@ class StructType : public ZoneObject {
: field_count_(field_count),
zone_(zone),
cursor_(0),
- buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))) {}
+ buffer_(zone->NewArray<ValueType>(static_cast<int>(field_count))),
+ mutabilities_(zone->NewArray<bool>(static_cast<int>(field_count))) {}
- void AddField(ValueType type) {
+ void AddField(ValueType type, bool mutability) {
DCHECK_LT(cursor_, field_count_);
+ mutabilities_[cursor_] = mutability;
buffer_[cursor_++] = type;
}
StructType* Build() {
DCHECK_EQ(cursor_, field_count_);
uint32_t* offsets = zone_->NewArray<uint32_t>(field_count_);
- return new (zone_) StructType(field_count_, offsets, buffer_);
+ return new (zone_)
+ StructType(field_count_, offsets, buffer_, mutabilities_);
}
private:
@@ -88,25 +105,30 @@ class StructType : public ZoneObject {
Zone* zone_;
uint32_t cursor_;
ValueType* buffer_;
+ bool* mutabilities_;
};
private:
uint32_t field_count_;
uint32_t* field_offsets_;
const ValueType* reps_;
+ const bool* mutabilities_;
};
class ArrayType : public ZoneObject {
public:
- constexpr explicit ArrayType(ValueType rep) : rep_(rep) {}
+ constexpr explicit ArrayType(ValueType rep, bool mutability)
+ : rep_(rep), mutability_(mutability) {}
ValueType element_type() const { return rep_; }
+ bool mutability() const { return mutability_; }
bool operator==(const ArrayType& other) const { return rep_ == other.rep_; }
bool operator!=(const ArrayType& other) const { return rep_ != other.rep_; }
private:
const ValueType rep_;
+ const bool mutability_;
};
} // namespace wasm
diff --git a/chromium/v8/src/wasm/sync-streaming-decoder.cc b/chromium/v8/src/wasm/sync-streaming-decoder.cc
new file mode 100644
index 00000000000..7152806d9d9
--- /dev/null
+++ b/chromium/v8/src/wasm/sync-streaming-decoder.cc
@@ -0,0 +1,112 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate.h"
+#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-serialization.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class V8_EXPORT_PRIVATE SyncStreamingDecoder : public StreamingDecoder {
+ public:
+ SyncStreamingDecoder(Isolate* isolate, const WasmFeatures& enabled,
+ Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver)
+ : isolate_(isolate),
+ enabled_(enabled),
+ context_(context),
+ api_method_name_for_errors_(api_method_name_for_errors),
+ resolver_(resolver) {}
+
+ // The buffer passed into OnBytesReceived is owned by the caller.
+ void OnBytesReceived(Vector<const uint8_t> bytes) override {
+ buffer_.emplace_back(bytes.size());
+ CHECK_EQ(buffer_.back().size(), bytes.size());
+ std::memcpy(buffer_.back().data(), bytes.data(), bytes.size());
+ buffer_size_ += bytes.size();
+ }
+
+ void Finish() override {
+ // We copy all received chunks into one byte buffer.
+ auto bytes = std::make_unique<uint8_t[]>(buffer_size_);
+ uint8_t* destination = bytes.get();
+ for (auto& chunk : buffer_) {
+ std::memcpy(destination, chunk.data(), chunk.size());
+ destination += chunk.size();
+ }
+ CHECK_EQ(destination - bytes.get(), buffer_size_);
+
+ // Check if we can deserialize the module from cache.
+ if (deserializing()) {
+ HandleScope scope(isolate_);
+ SaveAndSwitchContext saved_context(isolate_, *context_);
+
+ MaybeHandle<WasmModuleObject> module_object = DeserializeNativeModule(
+ isolate_, compiled_module_bytes_,
+ Vector<const uint8_t>(bytes.get(), buffer_size_), url());
+
+ if (!module_object.is_null()) {
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ resolver_->OnCompilationSucceeded(module);
+ return;
+ }
+ }
+
+ // Compile the received bytes synchronously.
+ ModuleWireBytes wire_bytes(bytes.get(), bytes.get() + buffer_size_);
+ ErrorThrower thrower(isolate_, api_method_name_for_errors_);
+ MaybeHandle<WasmModuleObject> module_object =
+ isolate_->wasm_engine()->SyncCompile(isolate_, enabled_, &thrower,
+ wire_bytes);
+ if (thrower.error()) {
+ resolver_->OnCompilationFailed(thrower.Reify());
+ return;
+ }
+ Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+ if (module_compiled_callback_) {
+ module_compiled_callback_(module->shared_native_module());
+ }
+ resolver_->OnCompilationSucceeded(module);
+ }
+
+ void Abort() override {
+ // Abort is fully handled by the API, we only clear the buffer.
+ buffer_.clear();
+ }
+
+ void NotifyCompilationEnded() override { buffer_.clear(); }
+
+ void NotifyNativeModuleCreated(
+ const std::shared_ptr<NativeModule>&) override {
+ // This function is only called from the {AsyncCompileJob}.
+ UNREACHABLE();
+ }
+
+ private:
+ Isolate* isolate_;
+ const WasmFeatures enabled_;
+ Handle<Context> context_;
+ const char* api_method_name_for_errors_;
+ std::shared_ptr<CompilationResultResolver> resolver_;
+
+ std::vector<std::vector<uint8_t>> buffer_;
+ size_t buffer_size_ = 0;
+};
+
+std::unique_ptr<StreamingDecoder> StreamingDecoder::CreateSyncStreamingDecoder(
+ Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
+ const char* api_method_name_for_errors,
+ std::shared_ptr<CompilationResultResolver> resolver) {
+ return std::make_unique<SyncStreamingDecoder>(isolate, enabled, context,
+ api_method_name_for_errors,
+ std::move(resolver));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index 357dafbe2c7..3189629103a 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -20,44 +20,40 @@ namespace wasm {
// Type for holding simd values, defined in wasm-value.h.
class Simd128;
-// Type lattice: Given a fixed struct type S, the following lattice
-// defines the subtyping relation among types:
-// For every two types connected by a line, the top type is a
-// (direct) subtype of the bottom type.
-//
-// AnyRef
-// / \
-// / EqRef
-// / / \
-// FuncRef ExnRef OptRef(S)
-// \ | / \
-// I32 I64 F32 F64 NullRef Ref(S)
-// \ \ \ \ | /
-// ---------------------- Bottom ---------
// Format: kind, log2Size, code, machineType, shortName, typeName
//
// Some of these types are from proposals that are not standardized yet:
-// - "ref" types per https://github.com/WebAssembly/function-references
-// - "optref"/"eqref" per https://github.com/WebAssembly/gc
-//
-// TODO(7748): Extend this with struct and function subtyping.
-// Keep up to date with funcref vs. anyref subtyping.
-#define FOREACH_VALUE_TYPE(V) \
- V(Stmt, -1, Void, None, 'v', "<stmt>") \
- V(I32, 2, I32, Int32, 'i', "i32") \
- V(I64, 3, I64, Int64, 'l', "i64") \
- V(F32, 2, F32, Float32, 'f', "f32") \
- V(F64, 3, F64, Float64, 'd', "f64") \
- V(S128, 4, S128, Simd128, 's', "s128") \
- V(AnyRef, kSystemPointerSizeLog2, AnyRef, TaggedPointer, 'r', "anyref") \
- V(FuncRef, kSystemPointerSizeLog2, FuncRef, TaggedPointer, 'a', "funcref") \
- V(NullRef, kSystemPointerSizeLog2, NullRef, TaggedPointer, 'n', "nullref") \
- V(ExnRef, kSystemPointerSizeLog2, ExnRef, TaggedPointer, 'e', "exn") \
- V(Ref, kSystemPointerSizeLog2, Ref, TaggedPointer, '*', "ref") \
- V(OptRef, kSystemPointerSizeLog2, OptRef, TaggedPointer, 'o', "optref") \
- V(EqRef, kSystemPointerSizeLog2, EqRef, TaggedPointer, 'q', "eqref") \
+// - "ref"/"optref" (a.k.a. "ref null") per
+// https://github.com/WebAssembly/function-references
+// - "rtt" per https://github.com/WebAssembly/gc
+#define FOREACH_VALUE_TYPE(V) \
+ V(Stmt, -1, Void, None, 'v', "<stmt>") \
+ V(I32, 2, I32, Int32, 'i', "i32") \
+ V(I64, 3, I64, Int64, 'l', "i64") \
+ V(F32, 2, F32, Float32, 'f', "f32") \
+ V(F64, 3, F64, Float64, 'd', "f64") \
+ V(S128, 4, S128, Simd128, 's', "s128") \
+ V(I8, 0, I8, Int8, 'b', "i8") \
+ V(I16, 1, I16, Int16, 'h', "i16") \
+ V(Rtt, kSystemPointerSizeLog2, Rtt, TaggedPointer, 't', "rtt") \
+ V(Ref, kSystemPointerSizeLog2, Ref, TaggedPointer, 'r', "ref") \
+ V(OptRef, kSystemPointerSizeLog2, OptRef, TaggedPointer, 'n', "ref null") \
V(Bottom, -1, Void, None, '*', "<bot>")
+enum HeapType : uint32_t {
+ kHeapFunc = kV8MaxWasmTypes, // shorthand: c
+ kHeapExtern, // shorthand: e
+ kHeapEq, // shorthand: q
+ kHeapExn // shorthand: x
+};
+enum Nullability : bool { kNonNullable, kNullable };
+
+V8_INLINE constexpr bool is_generic_heap_type(HeapType ht) {
+ STATIC_ASSERT(kHeapExtern >= kHeapFunc && kHeapEq >= kHeapFunc &&
+ kHeapExn >= kHeapFunc);
+ return ht >= kHeapFunc;
+}
+
class ValueType {
public:
enum Kind : uint8_t {
@@ -66,37 +62,68 @@ class ValueType {
#undef DEF_ENUM
};
- constexpr bool has_immediate() const {
- return kind() == kRef || kind() == kOptRef;
+ constexpr bool is_reference_type() const {
+ return kind() == kRef || kind() == kOptRef || kind() == kRtt;
+ }
+
+ constexpr bool is_packed() const { return kind() == kI8 || kind() == kI16; }
+
+ constexpr bool is_nullable() const { return kind() == kOptRef; }
+
+ constexpr bool is_reference_to(HeapType htype) const {
+ return (kind() == kRef || kind() == kOptRef) && heap_type() == htype;
+ }
+
+ constexpr ValueType Unpacked() const {
+ return is_packed() ? Primitive(kI32) : *this;
+ }
+
+ constexpr bool has_index() const {
+ return is_reference_type() && !is_generic_heap_type(heap_type());
}
+ constexpr bool has_depth() const { return kind() == kRtt; }
constexpr ValueType() : bit_field_(KindField::encode(kStmt)) {}
- explicit constexpr ValueType(Kind kind)
- : bit_field_(KindField::encode(kind)) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(!has_immediate());
-#endif
+ static constexpr ValueType Primitive(Kind kind) {
+ CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
+ return ValueType(KindField::encode(kind));
}
- constexpr ValueType(Kind kind, uint32_t ref_index)
- : bit_field_(KindField::encode(kind) | RefIndexField::encode(ref_index)) {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(has_immediate());
-#endif
+ static constexpr ValueType Ref(HeapType heap_type, Nullability nullability) {
+ return ValueType(
+ KindField::encode(nullability == kNullable ? kOptRef : kRef) |
+ HeapTypeField::encode(heap_type));
+ }
+
+ static constexpr ValueType Rtt(HeapType heap_type,
+ uint8_t inheritance_depth) {
+ return ValueType(KindField::encode(kRtt) |
+ HeapTypeField::encode(heap_type) |
+ DepthField::encode(inheritance_depth));
+ }
+
+ static constexpr ValueType FromRawBitField(uint32_t bit_field) {
+ return ValueType(bit_field);
}
constexpr Kind kind() const { return KindField::decode(bit_field_); }
+ constexpr HeapType heap_type() const {
+ CONSTEXPR_DCHECK(is_reference_type());
+ return HeapTypeField::decode(bit_field_);
+ }
+ constexpr uint8_t depth() const {
+ CONSTEXPR_DCHECK(has_depth());
+ return DepthField::decode(bit_field_);
+ }
constexpr uint32_t ref_index() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK(has_immediate());
-#endif
- return RefIndexField::decode(bit_field_);
+ CONSTEXPR_DCHECK(has_index());
+ return static_cast<uint32_t>(heap_type());
}
+ constexpr uint32_t raw_bit_field() const { return bit_field_; }
+
constexpr int element_size_log2() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kStmt, kind());
- DCHECK_NE(kBottom, kind());
-#endif
+ CONSTEXPR_DCHECK(kStmt != kind());
+ CONSTEXPR_DCHECK(kBottom != kind());
constexpr int kElementSizeLog2[] = {
#define ELEM_SIZE_LOG2(kind, log2Size, ...) log2Size,
@@ -116,59 +143,8 @@ class ValueType {
return bit_field_ != other.bit_field_;
}
- // TODO(7748): Extend this with struct and function subtyping.
- // Keep up to date with funcref vs. anyref subtyping.
- constexpr bool IsSubTypeOf(ValueType other) const {
- return (*this == other) || (other.kind() == kAnyRef && IsReferenceType()) ||
- (kind() == kNullRef && other.kind() != kRef &&
- other.IsReferenceType()) ||
- (other.kind() == kEqRef &&
- (kind() == kExnRef || kind() == kOptRef || kind() == kRef)) ||
- (kind() == kRef && other.kind() == kOptRef &&
- ref_index() == other.ref_index());
- }
-
- constexpr bool IsReferenceType() const {
- return kind() == kAnyRef || kind() == kFuncRef || kind() == kNullRef ||
- kind() == kExnRef || kind() == kRef || kind() == kOptRef ||
- kind() == kEqRef;
- }
-
- // TODO(7748): Extend this with struct and function subtyping.
- // Keep up to date with funcref vs. anyref subtyping.
- static ValueType CommonSubType(ValueType a, ValueType b) {
- if (a == b) return a;
- // The only sub type of any value type is {bot}.
- if (!a.IsReferenceType() || !b.IsReferenceType()) {
- return ValueType(kBottom);
- }
- if (a.IsSubTypeOf(b)) return a;
- if (b.IsSubTypeOf(a)) return b;
- // {a} and {b} are not each other's subtype.
- // If one of them is not nullable, their greatest subtype is bottom,
- // otherwise null.
- if (a.kind() == kRef || b.kind() == kRef) return ValueType(kBottom);
- return ValueType(kNullRef);
- }
-
- constexpr ValueTypeCode value_type_code() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kBottom, kind());
-#endif
-
- constexpr ValueTypeCode kValueTypeCode[] = {
-#define TYPE_CODE(kind, log2Size, code, ...) kLocal##code,
- FOREACH_VALUE_TYPE(TYPE_CODE)
-#undef TYPE_CODE
- };
-
- return kValueTypeCode[kind()];
- }
-
constexpr MachineType machine_type() const {
-#if V8_HAS_CXX14_CONSTEXPR
- DCHECK_NE(kBottom, kind());
-#endif
+ CONSTEXPR_DCHECK(kBottom != kind());
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
@@ -184,22 +160,70 @@ class ValueType {
return machine_type().representation();
}
+ constexpr ValueTypeCode value_type_code() const {
+ CONSTEXPR_DCHECK(kind() != kBottom);
+ switch (kind()) {
+ case kOptRef:
+ switch (heap_type()) {
+ case kHeapFunc:
+ return kLocalFuncRef;
+ case kHeapExtern:
+ return kLocalExternRef;
+ case kHeapEq:
+ return kLocalEqRef;
+ case kHeapExn:
+ return kLocalExnRef;
+ default:
+ return kLocalOptRef;
+ }
+ case kRef:
+ return kLocalRef;
+ case kStmt:
+ return kLocalVoid;
+ case kRtt:
+ return kLocalRtt;
+ default:
+ return static_cast<ValueTypeCode>(kLocalI32 - (kind() - kI32));
+ }
+ }
+
+ constexpr bool encoding_needs_heap_type() const {
+ return kind() == kRef || kind() == kRtt ||
+ (kind() == kOptRef && !is_generic_heap_type(heap_type()));
+ }
+
+ constexpr uint32_t heap_type_code() const {
+ CONSTEXPR_DCHECK(encoding_needs_heap_type());
+ switch (heap_type()) {
+ case kHeapFunc:
+ return kLocalFuncRef;
+ case kHeapExn:
+ return kLocalExnRef;
+ case kHeapExtern:
+ return kLocalExternRef;
+ case kHeapEq:
+ return kLocalEqRef;
+ default:
+ return static_cast<uint32_t>(heap_type());
+ }
+ }
+
static ValueType For(MachineType type) {
switch (type.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
- return ValueType(kI32);
+ return Primitive(kI32);
case MachineRepresentation::kWord64:
- return ValueType(kI64);
+ return Primitive(kI64);
case MachineRepresentation::kFloat32:
- return ValueType(kF32);
+ return Primitive(kF32);
case MachineRepresentation::kFloat64:
- return ValueType(kF64);
+ return Primitive(kF64);
case MachineRepresentation::kTaggedPointer:
- return ValueType(kAnyRef);
+ return Ref(kHeapExtern, kNullable);
case MachineRepresentation::kSimd128:
- return ValueType(kS128);
+ return Primitive(kS128);
default:
UNREACHABLE();
}
@@ -215,20 +239,62 @@ class ValueType {
return kShortName[kind()];
}
- constexpr const char* type_name() const {
+ const std::string type_name() const {
+ std::ostringstream buf;
+ switch (kind()) {
+ case kRef:
+ buf << "(ref " << heap_name() << ")";
+ break;
+ case kOptRef:
+ if (is_generic_heap_type(heap_type())) {
+ // We prefer the shorthand to be backwards-compatible with previous
+ // proposals.
+ buf << heap_name() << "ref";
+ } else {
+ buf << "(ref null " << heap_name() << ")";
+ }
+ break;
+ case kRtt:
+ buf << "(rtt " << depth() << " " << heap_name() + ")";
+ break;
+ default:
+ buf << kind_name();
+ }
+ return buf.str();
+ }
+
+ private:
+ using KindField = base::BitField<Kind, 0, 5>;
+ using HeapTypeField = base::BitField<HeapType, 5, 20>;
+ using DepthField = base::BitField<uint8_t, 25, 7>;
+
+ constexpr explicit ValueType(uint32_t bit_field) : bit_field_(bit_field) {}
+
+ constexpr const char* kind_name() const {
constexpr const char* kTypeName[] = {
-#define TYPE_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
+#define KIND_NAME(kind, log2Size, code, machineType, shortName, typeName, ...) \
typeName,
- FOREACH_VALUE_TYPE(TYPE_NAME)
+ FOREACH_VALUE_TYPE(KIND_NAME)
#undef TYPE_NAME
};
return kTypeName[kind()];
}
- private:
- using KindField = base::BitField<Kind, 0, 8>;
- using RefIndexField = base::BitField<uint32_t, 8, 24>;
+ const std::string heap_name() const {
+ switch (heap_type()) {
+ case kHeapFunc:
+ return std::string("func");
+ case kHeapExtern:
+ return std::string("extern");
+ case kHeapEq:
+ return std::string("eq");
+ case kHeapExn:
+ return std::string("exn");
+ default:
+ return std::to_string(static_cast<uint32_t>(heap_type()));
+ }
+ }
uint32_t bit_field_;
};
@@ -245,18 +311,20 @@ inline std::ostream& operator<<(std::ostream& oss, ValueType type) {
return oss << type.type_name();
}
-constexpr ValueType kWasmI32 = ValueType(ValueType::kI32);
-constexpr ValueType kWasmI64 = ValueType(ValueType::kI64);
-constexpr ValueType kWasmF32 = ValueType(ValueType::kF32);
-constexpr ValueType kWasmF64 = ValueType(ValueType::kF64);
-constexpr ValueType kWasmAnyRef = ValueType(ValueType::kAnyRef);
-constexpr ValueType kWasmEqRef = ValueType(ValueType::kEqRef);
-constexpr ValueType kWasmExnRef = ValueType(ValueType::kExnRef);
-constexpr ValueType kWasmFuncRef = ValueType(ValueType::kFuncRef);
-constexpr ValueType kWasmNullRef = ValueType(ValueType::kNullRef);
-constexpr ValueType kWasmS128 = ValueType(ValueType::kS128);
-constexpr ValueType kWasmStmt = ValueType(ValueType::kStmt);
-constexpr ValueType kWasmBottom = ValueType(ValueType::kBottom);
+constexpr ValueType kWasmI32 = ValueType::Primitive(ValueType::kI32);
+constexpr ValueType kWasmI64 = ValueType::Primitive(ValueType::kI64);
+constexpr ValueType kWasmF32 = ValueType::Primitive(ValueType::kF32);
+constexpr ValueType kWasmF64 = ValueType::Primitive(ValueType::kF64);
+constexpr ValueType kWasmS128 = ValueType::Primitive(ValueType::kS128);
+constexpr ValueType kWasmI8 = ValueType::Primitive(ValueType::kI8);
+constexpr ValueType kWasmI16 = ValueType::Primitive(ValueType::kI16);
+constexpr ValueType kWasmStmt = ValueType::Primitive(ValueType::kStmt);
+constexpr ValueType kWasmBottom = ValueType::Primitive(ValueType::kBottom);
+// Established wasm shorthands:
+constexpr ValueType kWasmFuncRef = ValueType::Ref(kHeapFunc, kNullable);
+constexpr ValueType kWasmExnRef = ValueType::Ref(kHeapExn, kNullable);
+constexpr ValueType kWasmExternRef = ValueType::Ref(kHeapExtern, kNullable);
+constexpr ValueType kWasmEqRef = ValueType::Ref(kHeapEq, kNullable);
#define FOREACH_WASMVALUE_CTYPES(V) \
V(kI32, int32_t) \
@@ -332,7 +400,7 @@ class LoadType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
FOREACH_LOAD_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
@@ -403,7 +471,7 @@ class StoreType {
};
static constexpr ValueType kValueType[] = {
-#define VALUE_TYPE(type, ...) ValueType(ValueType::k##type),
+#define VALUE_TYPE(type, ...) ValueType::Primitive(ValueType::k##type),
FOREACH_STORE_TYPE(VALUE_TYPE)
#undef VALUE_TYPE
};
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index 5477a18f33d..f79b98e5687 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -83,7 +83,7 @@ base::AddressRegion DisjointAllocationPool::Merge(
auto below = above;
--below;
- // Sanity check:
+ // Consistency check:
DCHECK(above == regions_.end() || below->end() < above->begin());
// Adjacent to {below}: merge and done.
@@ -327,6 +327,12 @@ void WasmCode::Print(const char* name) const {
StdoutStream os;
os << "--- WebAssembly code ---\n";
Disassemble(name, os);
+ if (native_module_->HasDebugInfo()) {
+ if (auto* debug_side_table =
+ native_module_->GetDebugInfo()->GetDebugSideTableIfExists(this)) {
+ debug_side_table->Print(os);
+ }
+ }
os << "--- End code ---\n";
}
@@ -849,13 +855,13 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
code->is_off_heap_trampoline() ? 0 : code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
- reloc_info = OwnedVector<byte>::New(relocation_size);
- memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
+ reloc_info = OwnedVector<byte>::Of(
+ Vector<byte>{code->relocation_start(), relocation_size});
}
Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
code->GetIsolate());
OwnedVector<byte> source_pos =
- OwnedVector<byte>::New(source_pos_table->length());
+ OwnedVector<byte>::NewForOverwrite(source_pos_table->length());
if (source_pos_table->length() > 0) {
source_pos_table->copy_out(0, source_pos.start(),
source_pos_table->length());
@@ -923,7 +929,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
WasmCode::kFunction, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
- new_code->MaybePrint(nullptr);
+ new_code->MaybePrint();
new_code->Validate();
return PublishCode(std::move(new_code));
@@ -1347,7 +1353,9 @@ class NativeModuleWireBytesStorage final : public WireBytesStorage {
: wire_bytes_(std::move(wire_bytes)) {}
Vector<const uint8_t> GetCode(WireBytesRef ref) const final {
- return wire_bytes_->as_vector().SubVector(ref.offset(), ref.end_offset());
+ return std::atomic_load(&wire_bytes_)
+ ->as_vector()
+ .SubVector(ref.offset(), ref.end_offset());
}
private:
@@ -1358,7 +1366,7 @@ class NativeModuleWireBytesStorage final : public WireBytesStorage {
void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
auto shared_wire_bytes =
std::make_shared<OwnedVector<const uint8_t>>(std::move(wire_bytes));
- wire_bytes_ = shared_wire_bytes;
+ std::atomic_store(&wire_bytes_, shared_wire_bytes);
if (!shared_wire_bytes->empty()) {
compilation_state_->SetWireBytesStorage(
std::make_shared<NativeModuleWireBytesStorage>(
@@ -1851,7 +1859,7 @@ bool NativeModule::IsTieredDown() {
return tiering_state_ == kTieredDown;
}
-void NativeModule::TriggerRecompilation() {
+void NativeModule::RecompileForTiering() {
// Read the tiering state under the lock, then trigger recompilation after
// releasing the lock. If the tiering state was changed when the triggered
// compilation units finish, code installation will handle that correctly.
@@ -1863,24 +1871,51 @@ void NativeModule::TriggerRecompilation() {
RecompileNativeModule(this, current_state);
}
+std::vector<int> NativeModule::FindFunctionsToRecompile(
+ TieringState new_tiering_state) {
+ base::MutexGuard guard(&allocation_mutex_);
+ std::vector<int> function_indexes;
+ int imported = module()->num_imported_functions;
+ int declared = module()->num_declared_functions;
+ for (int slot_index = 0; slot_index < declared; ++slot_index) {
+ int function_index = imported + slot_index;
+ WasmCode* code = code_table_[slot_index];
+ bool code_is_good = new_tiering_state == kTieredDown
+ ? code && code->for_debugging()
+ : code && code->tier() == ExecutionTier::kTurbofan;
+ if (!code_is_good) function_indexes.push_back(function_index);
+ }
+ return function_indexes;
+}
+
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
// Free the code space.
code_allocator_.FreeCode(codes);
- base::MutexGuard guard(&allocation_mutex_);
- // Remove debug side tables for all removed code objects.
- if (debug_info_) debug_info_->RemoveDebugSideTables(codes);
- // Free the {WasmCode} objects. This will also unregister trap handler data.
- for (WasmCode* code : codes) {
- DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
- owned_code_.erase(code->instruction_start());
+ DebugInfo* debug_info = nullptr;
+ {
+ base::MutexGuard guard(&allocation_mutex_);
+ debug_info = debug_info_.get();
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
+ for (WasmCode* code : codes) {
+ DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
+ owned_code_.erase(code->instruction_start());
+ }
}
+ // Remove debug side tables for all removed code objects, after releasing our
+ // lock. This is to avoid lock order inversion.
+ if (debug_info) debug_info->RemoveDebugSideTables(codes);
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
return code_allocator_.GetNumCodeSpaces();
}
+bool NativeModule::HasDebugInfo() const {
+ base::MutexGuard guard(&allocation_mutex_);
+ return debug_info_ != nullptr;
+}
+
DebugInfo* NativeModule::GetDebugInfo() {
base::MutexGuard guard(&allocation_mutex_);
if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 443f6f36059..d76adccad76 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -71,8 +71,9 @@ struct WasmModule;
V(WasmStackOverflow) \
V(WasmThrow) \
V(WasmRethrow) \
+ V(WasmTraceEnter) \
+ V(WasmTraceExit) \
V(WasmTraceMemory) \
- V(AllocateHeapNumber) \
V(ArgumentsAdaptorTrampoline) \
V(BigIntToI32Pair) \
V(BigIntToI64) \
@@ -566,7 +567,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
UseTrapHandler use_trap_handler() const { return use_trap_handler_; }
void set_lazy_compile_frozen(bool frozen) { lazy_compile_frozen_ = frozen; }
bool lazy_compile_frozen() const { return lazy_compile_frozen_; }
- Vector<const uint8_t> wire_bytes() const { return wire_bytes_->as_vector(); }
+ Vector<const uint8_t> wire_bytes() const {
+ return std::atomic_load(&wire_bytes_)->as_vector();
+ }
const WasmModule* module() const { return module_.get(); }
std::shared_ptr<const WasmModule> shared_module() const { return module_; }
size_t committed_code_space() const {
@@ -574,6 +577,10 @@ class V8_EXPORT_PRIVATE NativeModule final {
}
WasmEngine* engine() const { return engine_; }
+ bool HasWireBytes() const {
+ auto wire_bytes = std::atomic_load(&wire_bytes_);
+ return wire_bytes && !wire_bytes->empty();
+ }
void SetWireBytes(OwnedVector<const uint8_t> wire_bytes);
WasmCode* Lookup(Address) const;
@@ -600,18 +607,23 @@ class V8_EXPORT_PRIVATE NativeModule final {
Vector<WasmCompilationResult>);
// Set a new tiering state, but don't trigger any recompilation yet; use
- // {TriggerRecompilation} for that. The two steps are split because In some
+ // {RecompileForTiering} for that. The two steps are split because In some
// scenarios we need to drop locks before triggering recompilation.
void SetTieringState(TieringState);
// Check whether this modules is tiered down for debugging.
bool IsTieredDown();
- // Trigger a full recompilation of this module, in the tier set previously via
- // {SetTieringState}. When tiering down, the calling thread contributes to
- // compilation and only returns once recompilation is done. Tiering up happens
- // concurrently, so this method might return before it is complete.
- void TriggerRecompilation();
+ // Fully recompile this module in the tier set previously via
+ // {SetTieringState}. The calling thread contributes to compilation and only
+ // returns once recompilation is done.
+ void RecompileForTiering();
+
+ // Find all functions that need to be recompiled for a new tier. Note that
+ // compilation jobs might run concurrently, so this method only considers the
+ // compilation state of this native module at the time of the call.
+ // Returns a vector of function indexes to recompile.
+ std::vector<int> FindFunctionsToRecompile(TieringState);
// Free a set of functions of this module. Uncommits whole pages if possible.
// The given vector must be ordered by the instruction start address, and all
@@ -623,6 +635,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Retrieve the number of separately reserved code spaces for this module.
size_t GetNumberOfCodeSpacesForTesting() const;
+ // Check whether there is DebugInfo for this NativeModule.
+ bool HasDebugInfo() const;
+
// Get or create the debug info for this NativeModule.
DebugInfo* GetDebugInfo();
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index b860ae692ca..9304f116fcc 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -18,22 +18,28 @@ namespace wasm {
constexpr uint32_t kWasmMagic = 0x6d736100;
constexpr uint32_t kWasmVersion = 0x01;
-// Binary encoding of local types.
+// Binary encoding of value and heap types.
enum ValueTypeCode : uint8_t {
+ // Current wasm types
kLocalVoid = 0x40,
kLocalI32 = 0x7f,
kLocalI64 = 0x7e,
kLocalF32 = 0x7d,
kLocalF64 = 0x7c,
+ // Simd proposal
kLocalS128 = 0x7b,
+ // reftypes, typed-funcref, and GC proposals
+ kLocalI8 = 0x7a,
+ kLocalI16 = 0x79,
kLocalFuncRef = 0x70,
- kLocalAnyRef = 0x6f,
- kLocalNullRef = 0x6e,
- kLocalRef = 0x6d, // GC proposal
- kLocalOptRef = 0x6c, // GC proposal
- kLocalEqRef = 0x6b, // GC proposal
- kLocalI31Ref = 0x6a, // GC proposal
- kLocalRttRef = 0x69, // GC proposal
+ kLocalExternRef = 0x6f,
+ // kLocalAny = 0x6e, // TODO(7748): Implement
+ kLocalEqRef = 0x6d,
+ kLocalOptRef = 0x6c,
+ kLocalRef = 0x6b,
+ // kLocalI31 = 0x6a, // TODO(7748): Implement
+ kLocalRtt = 0x69,
+ // Exception handling proposal
kLocalExnRef = 0x68,
};
// Binary encoding of other types.
diff --git a/chromium/v8/src/wasm/wasm-debug-evaluate.cc b/chromium/v8/src/wasm/wasm-debug-evaluate.cc
index 019ae5f73ec..a8c4cf2c40d 100644
--- a/chromium/v8/src/wasm/wasm-debug-evaluate.cc
+++ b/chromium/v8/src/wasm/wasm-debug-evaluate.cc
@@ -9,6 +9,7 @@
#include "src/api/api-inl.h"
#include "src/codegen/machine-type.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/execution/frames-inl.h"
#include "src/wasm/value-type.h"
#include "src/wasm/wasm-arguments.h"
@@ -33,15 +34,15 @@ static bool CheckSignature(ValueType return_type,
const FunctionSig* sig, ErrorThrower* thrower) {
if (sig->return_count() != 1 && return_type != kWasmBottom) {
thrower->CompileError("Invalid return type. Got none, expected %s",
- return_type.type_name());
+ return_type.type_name().c_str());
return false;
}
if (sig->return_count() == 1) {
if (sig->GetReturn(0) != return_type) {
thrower->CompileError("Invalid return type. Got %s, expected %s",
- sig->GetReturn(0).type_name(),
- return_type.type_name());
+ sig->GetReturn(0).type_name().c_str(),
+ return_type.type_name().c_str());
return false;
}
}
@@ -56,7 +57,8 @@ static bool CheckSignature(ValueType return_type,
if (sig->GetParam(p) != argument_type) {
thrower->CompileError(
"Invalid argument type for argument %zu. Got %s, expected %s", p,
- sig->GetParam(p).type_name(), argument_type.type_name());
+ sig->GetParam(p).type_name().c_str(),
+ argument_type.type_name().c_str());
return false;
}
++p;
@@ -202,8 +204,8 @@ class DebugEvaluatorProxy {
DCHECK(frame_->is_wasm());
wasm::DebugInfo* debug_info =
WasmFrame::cast(frame_)->native_module()->GetDebugInfo();
- return debug_info->GetLocalValue(local, isolate_, frame_->pc(),
- frame_->fp(), frame_->callee_fp());
+ return debug_info->GetLocalValue(local, frame_->pc(), frame_->fp(),
+ frame_->callee_fp());
}
uint32_t GetArgAsUInt32(const v8::FunctionCallbackInfo<v8::Value>& args,
@@ -350,10 +352,10 @@ Maybe<std::string> DebugEvaluateImpl(
Handle<WasmExportedFunction> entry_point =
Handle<WasmExportedFunction>::cast(entry_point_obj);
- Handle<WasmDebugInfo> debug_info =
- WasmInstanceObject::GetOrCreateDebugInfo(evaluator_instance);
+ // TODO(wasm): Cache this code.
Handle<Code> wasm_entry =
- WasmDebugInfo::GetCWasmEntry(debug_info, entry_point->sig());
+ compiler::CompileCWasmEntry(isolate, entry_point->sig());
+
CWasmArgumentsPacker packer(4 /* uint32_t return value, no parameters. */);
Execution::CallWasm(isolate, wasm_entry, entry_point->GetWasmCallTarget(),
evaluator_instance, packer.argv());
diff --git a/chromium/v8/src/wasm/wasm-debug-evaluate.h b/chromium/v8/src/wasm/wasm-debug-evaluate.h
index 31eba51a3cc..f4e3aef1754 100644
--- a/chromium/v8/src/wasm/wasm-debug-evaluate.h
+++ b/chromium/v8/src/wasm/wasm-debug-evaluate.h
@@ -7,7 +7,6 @@
#include "src/base/macros.h"
#include "src/handles/maybe-handles.h"
-#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-objects.h"
namespace v8 {
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index a8fd6505f0e..61f3492af96 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -4,6 +4,7 @@
#include "src/wasm/wasm-debug.h"
+#include <iomanip>
#include <unordered_map>
#include "src/base/optional.h"
@@ -15,15 +16,14 @@
#include "src/execution/frames-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
-#include "src/utils/identity-map.h"
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
-#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
+#include "src/wasm/wasm-opcodes-inl.h"
#include "src/wasm/wasm-value.h"
#include "src/zone/accounting-allocator.h"
@@ -49,29 +49,102 @@ Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
: isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
}
+MaybeHandle<JSObject> CreateFunctionTablesObject(
+ Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ auto tables = handle(instance->tables(), isolate);
+ if (tables->length() == 0) return MaybeHandle<JSObject>();
+
+ const char* table_label = "table%d";
+ Handle<JSObject> tables_obj = isolate->factory()->NewJSObjectWithNullProto();
+ for (int table_index = 0; table_index < tables->length(); ++table_index) {
+ auto func_table =
+ handle(WasmTableObject::cast(tables->get(table_index)), isolate);
+ if (func_table->type().heap_type() != kHeapFunc) continue;
+
+ Handle<String> table_name;
+ if (!WasmInstanceObject::GetTableNameOrNull(isolate, instance, table_index)
+ .ToHandle(&table_name)) {
+ table_name =
+ PrintFToOneByteString<true>(isolate, table_label, table_index);
+ }
+
+ Handle<JSObject> func_table_obj =
+ isolate->factory()->NewJSObjectWithNullProto();
+ JSObject::AddProperty(isolate, tables_obj, table_name, func_table_obj,
+ NONE);
+ for (int i = 0; i < func_table->current_length(); ++i) {
+ Handle<Object> func = WasmTableObject::Get(isolate, func_table, i);
+ DCHECK(!WasmCapiFunction::IsWasmCapiFunction(*func));
+ if (func->IsNull(isolate)) continue;
+
+ Handle<String> func_name;
+ Handle<JSObject> func_obj =
+ isolate->factory()->NewJSObjectWithNullProto();
+
+ if (WasmExportedFunction::IsWasmExportedFunction(*func)) {
+ auto target_func = Handle<WasmExportedFunction>::cast(func);
+ auto target_instance = handle(target_func->instance(), isolate);
+ auto module = handle(target_instance->module_object(), isolate);
+ func_name = WasmModuleObject::GetFunctionName(
+ isolate, module, target_func->function_index());
+ } else if (WasmJSFunction::IsWasmJSFunction(*func)) {
+ auto target_func = Handle<JSFunction>::cast(func);
+ func_name = JSFunction::GetName(target_func);
+ if (func_name->length() == 0) {
+ func_name = isolate->factory()->InternalizeUtf8String("anonymous");
+ }
+ }
+ JSObject::AddProperty(isolate, func_obj, func_name, func, NONE);
+ JSObject::AddDataElement(func_table_obj, i, func_obj, NONE);
+ }
+ }
+ return tables_obj;
+}
+
Handle<Object> WasmValueToValueObject(Isolate* isolate, WasmValue value) {
+ Handle<ByteArray> bytes;
switch (value.type().kind()) {
- case ValueType::kI32:
- if (Smi::IsValid(value.to<int32_t>()))
- return handle(Smi::FromInt(value.to<int32_t>()), isolate);
- return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
+ case ValueType::kI32: {
+ int32_t val = value.to_i32();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
case ValueType::kI64: {
- int64_t i64 = value.to<int64_t>();
- int32_t i32 = static_cast<int32_t>(i64);
- if (i32 == i64 && Smi::IsValid(i32))
- return handle(Smi::FromIntptr(i32), isolate);
- return PrintFToOneByteString<false>(isolate, "%" PRId64, i64);
+ int64_t val = value.to_i64();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
+ case ValueType::kF32: {
+ float val = value.to_f32();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
+ }
+ case ValueType::kF64: {
+ double val = value.to_f64();
+ bytes = isolate->factory()->NewByteArray(sizeof(val));
+ memcpy(bytes->GetDataStartAddress(), &val, sizeof(val));
+ break;
}
- case ValueType::kF32:
- return isolate->factory()->NewNumber(value.to<float>());
- case ValueType::kF64:
- return isolate->factory()->NewNumber(value.to<double>());
- case ValueType::kAnyRef:
- return value.to_anyref();
- default:
+ case ValueType::kOptRef: {
+ if (value.type().heap_type() == kHeapExtern) {
+ return isolate->factory()->NewWasmValue(
+ static_cast<int32_t>(kHeapExtern), value.to_externref());
+ } else {
+ // TODO(7748): Implement.
+ UNIMPLEMENTED();
+ }
+ }
+ default: {
+ // TODO(7748): Implement.
UNIMPLEMENTED();
- return isolate->factory()->undefined_value();
+ }
}
+ return isolate->factory()->NewWasmValue(
+ static_cast<int32_t>(value.type().kind()), bytes);
}
MaybeHandle<String> GetLocalNameString(Isolate* isolate,
@@ -87,176 +160,10 @@ MaybeHandle<String> GetLocalNameString(Isolate* isolate,
return isolate->factory()->NewStringFromUtf8(name);
}
-class InterpreterHandle {
- Isolate* isolate_;
- const WasmModule* module_;
- WasmInterpreter interpreter_;
- std::unordered_map<Address, uint32_t> activations_;
-
- uint32_t StartActivation(Address frame_pointer) {
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- uint32_t activation_id = thread->StartActivation();
- DCHECK_EQ(0, activations_.count(frame_pointer));
- activations_.insert(std::make_pair(frame_pointer, activation_id));
- return activation_id;
- }
-
- void FinishActivation(Address frame_pointer, uint32_t activation_id) {
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- thread->FinishActivation(activation_id);
- DCHECK_EQ(1, activations_.count(frame_pointer));
- activations_.erase(frame_pointer);
- }
-
- bool HasActivation(Address frame_pointer) {
- return activations_.count(frame_pointer);
- }
-
- std::pair<uint32_t, uint32_t> GetActivationFrameRange(
- WasmInterpreter::Thread* thread, Address frame_pointer) {
- DCHECK_EQ(1, activations_.count(frame_pointer));
- uint32_t activation_id = activations_.find(frame_pointer)->second;
- uint32_t num_activations = static_cast<uint32_t>(activations_.size() - 1);
- uint32_t frame_base = thread->ActivationFrameBase(activation_id);
- uint32_t frame_limit = activation_id == num_activations
- ? thread->GetFrameCount()
- : thread->ActivationFrameBase(activation_id + 1);
- DCHECK_LE(frame_base, frame_limit);
- DCHECK_LE(frame_limit, thread->GetFrameCount());
- return {frame_base, frame_limit};
- }
-
- static ModuleWireBytes GetBytes(WasmDebugInfo debug_info) {
- // Return raw pointer into heap. The WasmInterpreter will make its own copy
- // of this data anyway, and there is no heap allocation in-between.
- NativeModule* native_module =
- debug_info.wasm_instance().module_object().native_module();
- return ModuleWireBytes{native_module->wire_bytes()};
- }
-
- public:
- InterpreterHandle(Isolate* isolate, Handle<WasmDebugInfo> debug_info)
- : isolate_(isolate),
- module_(debug_info->wasm_instance().module_object().module()),
- interpreter_(isolate, module_, GetBytes(*debug_info),
- handle(debug_info->wasm_instance(), isolate)) {}
-
- WasmInterpreter* interpreter() { return &interpreter_; }
- const WasmModule* module() const { return module_; }
-
- // Returns true if exited regularly, false if a trap/exception occurred and
- // was not handled inside this activation. In the latter case, a pending
- // exception will have been set on the isolate.
- bool Execute(Handle<WasmInstanceObject> instance_object,
- Address frame_pointer, uint32_t func_index,
- Vector<WasmValue> argument_values,
- Vector<WasmValue> return_values) {
- DCHECK_GE(module()->functions.size(), func_index);
- const FunctionSig* sig = module()->functions[func_index].sig;
- DCHECK_EQ(sig->parameter_count(), argument_values.size());
- DCHECK_EQ(sig->return_count(), return_values.size());
-
- uint32_t activation_id = StartActivation(frame_pointer);
-
- WasmCodeRefScope code_ref_scope;
- WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
- thread->InitFrame(&module()->functions[func_index],
- argument_values.begin());
- bool finished = false;
- while (!finished) {
- // TODO(clemensb): Add occasional StackChecks.
- WasmInterpreter::State state = thread->Run();
- switch (state) {
- case WasmInterpreter::State::PAUSED:
- UNREACHABLE();
- case WasmInterpreter::State::FINISHED:
- // Perfect, just break the switch and exit the loop.
- finished = true;
- break;
- case WasmInterpreter::State::TRAPPED: {
- MessageTemplate message_id =
- WasmOpcodes::TrapReasonToMessageId(thread->GetTrapReason());
- Handle<JSObject> exception =
- isolate_->factory()->NewWasmRuntimeError(message_id);
- JSObject::AddProperty(isolate_, exception,
- isolate_->factory()->wasm_uncatchable_symbol(),
- isolate_->factory()->true_value(), NONE);
- auto result = thread->RaiseException(isolate_, exception);
- if (result == WasmInterpreter::Thread::HANDLED) break;
- // If no local handler was found, we fall-thru to {STOPPED}.
- DCHECK_EQ(WasmInterpreter::State::STOPPED, thread->state());
- V8_FALLTHROUGH;
- }
- case WasmInterpreter::State::STOPPED:
- // An exception happened, and the current activation was unwound
- // without hitting a local exception handler. All that remains to be
- // done is finish the activation and let the exception propagate.
- DCHECK_EQ(thread->ActivationFrameBase(activation_id),
- thread->GetFrameCount());
- DCHECK(isolate_->has_pending_exception());
- FinishActivation(frame_pointer, activation_id);
- return false;
- // RUNNING should never occur here.
- case WasmInterpreter::State::RUNNING:
- default:
- UNREACHABLE();
- }
- }
-
- // Copy back the return value.
-#ifdef DEBUG
- const int max_count = WasmFeatures::FromIsolate(isolate_).has_mv()
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
-#endif
- DCHECK_GE(max_count, sig->return_count());
- for (unsigned i = 0; i < sig->return_count(); ++i) {
- return_values[i] = thread->GetReturnValue(i);
- }
-
- FinishActivation(frame_pointer, activation_id);
-
- return true;
- }
-
- std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
- Address frame_pointer) {
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
-
- std::pair<uint32_t, uint32_t> frame_range =
- GetActivationFrameRange(thread, frame_pointer);
-
- std::vector<std::pair<uint32_t, int>> stack;
- stack.reserve(frame_range.second - frame_range.first);
- for (uint32_t fp = frame_range.first; fp < frame_range.second; ++fp) {
- auto frame = thread->GetFrame(fp);
- stack.emplace_back(frame->function()->func_index, frame->pc());
- }
- return stack;
- }
-
- int NumberOfActiveFrames(Address frame_pointer) {
- if (!HasActivation(frame_pointer)) return 0;
-
- DCHECK_EQ(1, interpreter()->GetThreadCount());
- WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
-
- std::pair<uint32_t, uint32_t> frame_range =
- GetActivationFrameRange(thread, frame_pointer);
-
- return frame_range.second - frame_range.first;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InterpreterHandle);
-};
-
// Generate a sorted and deduplicated list of byte offsets for this function's
// current positions on the stack.
std::vector<int> StackFramePositions(int func_index, Isolate* isolate) {
std::vector<int> byte_offsets;
- WasmCodeRefScope code_ref_scope;
for (StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
if (!it.is_wasm()) continue;
WasmFrame* frame = WasmFrame::cast(it.frame());
@@ -304,11 +211,43 @@ Address FindNewPC(WasmCode* wasm_code, int byte_offset,
} // namespace
+void DebugSideTable::Print(std::ostream& os) const {
+ os << "Debug side table (" << num_locals_ << " locals, " << entries_.size()
+ << " entries):\n";
+ for (auto& entry : entries_) entry.Print(os);
+ os << "\n";
+}
+
+void DebugSideTable::Entry::Print(std::ostream& os) const {
+ os << std::setw(6) << std::hex << pc_offset_ << std::dec << " [";
+ for (auto& value : values_) {
+ os << " " << value.type.type_name() << ":";
+ switch (value.kind) {
+ case kConstant:
+ os << "const#" << value.i32_const;
+ break;
+ case kRegister:
+ os << "reg#" << value.reg_code;
+ break;
+ case kStack:
+ os << "stack#" << value.stack_offset;
+ break;
+ }
+ }
+ os << " ]\n";
+}
+
Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
Isolate* isolate = instance->GetIsolate();
Handle<JSObject> module_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
+
+ Handle<String> instance_name =
+ isolate->factory()->InternalizeString(StaticCharVector("instance"));
+ JSObject::AddProperty(isolate, module_scope_object, instance_name, instance,
+ NONE);
+
if (instance->has_memory_object()) {
Handle<String> name;
// TODO(duongn): extend the logic when multiple memories are supported.
@@ -327,6 +266,14 @@ Handle<JSObject> GetModuleScopeObject(Handle<WasmInstanceObject> instance) {
NONE);
}
+ Handle<JSObject> function_tables_obj;
+ if (CreateFunctionTablesObject(instance).ToHandle(&function_tables_obj)) {
+ Handle<String> tables_name = isolate->factory()->InternalizeString(
+ StaticCharVector("function tables"));
+ JSObject::AddProperty(isolate, module_scope_object, tables_name,
+ function_tables_obj, NONE);
+ }
+
auto& globals = instance->module()->globals;
if (globals.size() > 0) {
Handle<JSObject> globals_obj =
@@ -357,29 +304,29 @@ class DebugInfoImpl {
explicit DebugInfoImpl(NativeModule* native_module)
: native_module_(native_module) {}
- int GetNumLocals(Isolate* isolate, Address pc) {
- FrameInspectionScope scope(this, isolate, pc);
+ int GetNumLocals(Address pc) {
+ FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
return scope.debug_side_table->num_locals();
}
- WasmValue GetLocalValue(int local, Isolate* isolate, Address pc, Address fp,
+ WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
return GetValue(scope.debug_side_table_entry, local, fp, debug_break_fp);
}
- int GetStackDepth(Isolate* isolate, Address pc) {
- FrameInspectionScope scope(this, isolate, pc);
+ int GetStackDepth(Address pc) {
+ FrameInspectionScope scope(this, pc);
if (!scope.is_inspectable()) return 0;
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
int value_count = scope.debug_side_table_entry->num_values();
return value_count - num_locals;
}
- WasmValue GetStackValue(int index, Isolate* isolate, Address pc, Address fp,
+ WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
int value_count = scope.debug_side_table_entry->num_values();
if (num_locals + index >= value_count) return {};
@@ -389,7 +336,7 @@ class DebugInfoImpl {
Handle<JSObject> GetLocalScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
Handle<JSObject> local_scope_object =
isolate->factory()->NewJSObjectWithNullProto();
@@ -401,40 +348,32 @@ class DebugInfoImpl {
// Fill parameters and locals.
int num_locals = static_cast<int>(scope.debug_side_table->num_locals());
DCHECK_LE(static_cast<int>(function->sig->parameter_count()), num_locals);
- if (num_locals > 0) {
- Handle<JSObject> locals_obj =
- isolate->factory()->NewJSObjectWithNullProto();
- Handle<String> locals_name =
- isolate->factory()->InternalizeString(StaticCharVector("locals"));
- JSObject::AddProperty(isolate, local_scope_object, locals_name,
- locals_obj, NONE);
- for (int i = 0; i < num_locals; ++i) {
- Handle<Name> name;
- if (!GetLocalNameString(isolate, native_module_, function->func_index,
- i)
- .ToHandle(&name)) {
- name = PrintFToOneByteString<true>(isolate, "var%d", i);
- }
- WasmValue value =
- GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
- Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
- // {name} can be a string representation of an element index.
- LookupIterator::Key lookup_key{isolate, name};
- LookupIterator it(isolate, locals_obj, lookup_key, locals_obj,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound()) continue;
- Object::AddDataProperty(&it, value_obj, NONE,
- Just(ShouldThrow::kThrowOnError),
- StoreOrigin::kNamed)
- .Check();
+ for (int i = 0; i < num_locals; ++i) {
+ Handle<Name> name;
+ if (!GetLocalNameString(isolate, native_module_, function->func_index, i)
+ .ToHandle(&name)) {
+ name = PrintFToOneByteString<true>(isolate, "var%d", i);
}
+ WasmValue value =
+ GetValue(scope.debug_side_table_entry, i, fp, debug_break_fp);
+ Handle<Object> value_obj = WasmValueToValueObject(isolate, value);
+ // {name} can be a string representation of an element index.
+ LookupIterator::Key lookup_key{isolate, name};
+ LookupIterator it(isolate, local_scope_object, lookup_key,
+ local_scope_object,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (it.IsFound()) continue;
+ Object::AddDataProperty(&it, value_obj, NONE,
+ Just(ShouldThrow::kThrowOnError),
+ StoreOrigin::kNamed)
+ .Check();
}
return local_scope_object;
}
Handle<JSObject> GetStackScopeObject(Isolate* isolate, Address pc, Address fp,
Address debug_break_fp) {
- FrameInspectionScope scope(this, isolate, pc);
+ FrameInspectionScope scope(this, pc);
Handle<JSObject> stack_scope_obj =
isolate->factory()->NewJSObjectWithNullProto();
@@ -468,10 +407,7 @@ class DebugInfoImpl {
WasmCode* RecompileLiftoffWithBreakpoints(
int func_index, Vector<int> offsets, Vector<int> extra_source_positions) {
- // During compilation, we cannot hold the lock, since compilation takes the
- // {NativeModule} lock, which could lead to deadlocks.
- mutex_.AssertUnheld();
-
+ DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Recompile the function with Liftoff, setting the new breakpoints.
// Not thread-safe. The caller is responsible for locking {mutex_}.
CompilationEnv env = native_module_->CreateCompilationEnv();
@@ -484,9 +420,11 @@ class DebugInfoImpl {
ForDebugging for_debugging =
offsets.size() == 1 && offsets[0] == 0 ? kForStepping : kForDebugging;
+ Counters* counters = nullptr;
+ WasmFeatures unused_detected;
WasmCompilationResult result = ExecuteLiftoffCompilation(
native_module_->engine()->allocator(), &env, body, func_index,
- for_debugging, nullptr, nullptr, offsets, &debug_sidetable,
+ for_debugging, counters, &unused_detected, offsets, &debug_sidetable,
extra_source_positions);
// Liftoff compilation failure is a FATAL error. We rely on complete Liftoff
// support for debugging.
@@ -497,62 +435,99 @@ class DebugInfoImpl {
native_module_->AddCompiledCode(std::move(result)));
DCHECK(new_code->is_inspectable());
- bool added =
- debug_side_tables_.emplace(new_code, std::move(debug_sidetable)).second;
- DCHECK(added);
- USE(added);
+ DCHECK_EQ(0, debug_side_tables_.count(new_code));
+ debug_side_tables_.emplace(new_code, std::move(debug_sidetable));
return new_code;
}
- void SetBreakpoint(int func_index, int offset, Isolate* current_isolate) {
- std::vector<int> breakpoints_copy;
- {
- // Hold the mutex while modifying the set of breakpoints, but release it
- // before compiling the new code (see comment in
- // {RecompileLiftoffWithBreakpoints}). This needs to be revisited once we
- // support setting different breakpoints in different isolates
- // (https://crbug.com/v8/10351).
- base::MutexGuard guard(&mutex_);
+ void SetBreakpoint(int func_index, int offset, Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
- // offset == 0 indicates flooding and should not happen here.
- DCHECK_NE(0, offset);
+ // Generate additional source positions for current stack frame positions.
+ // These source positions are used to find return addresses in the new code.
+ std::vector<int> stack_frame_positions =
+ StackFramePositions(func_index, isolate);
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point != breakpoints.end() && *insertion_point == offset) {
- // The breakpoint is already set.
- return;
- }
- breakpoints.insert(insertion_point, offset);
- breakpoints_copy = breakpoints;
+ // Hold the mutex while modifying breakpoints, to ensure consistency when
+ // multiple isolates set/remove breakpoints at the same time.
+ base::MutexGuard guard(&mutex_);
+
+ // offset == 0 indicates flooding and should not happen here.
+ DCHECK_NE(0, offset);
+
+ // Get the set of previously set breakpoints, to check later whether a new
+ // breakpoint was actually added.
+ std::vector<int> all_breakpoints = FindAllBreakpoints(func_index);
+
+ auto& isolate_data = per_isolate_data_[isolate];
+ std::vector<int>& breakpoints =
+ isolate_data.breakpoints_per_function[func_index];
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point != breakpoints.end() && *insertion_point == offset) {
+ // The breakpoint is already set for this isolate.
+ return;
}
+ breakpoints.insert(insertion_point, offset);
+
+ DCHECK(std::is_sorted(all_breakpoints.begin(), all_breakpoints.end()));
+ // Find the insertion position within {all_breakpoints}.
+ insertion_point = std::lower_bound(all_breakpoints.begin(),
+ all_breakpoints.end(), offset);
+ bool breakpoint_exists =
+ insertion_point != all_breakpoints.end() && *insertion_point == offset;
+ // If the breakpoint was already set before *and* we don't need any special
+ // positions for OSR, then we can just reuse the old code. Otherwise,
+ // recompile it. In any case, rewrite this isolate's stack to make sure that
+ // it uses up-to-date code containing the breakpoint.
+ WasmCode* new_code;
+ if (breakpoint_exists && stack_frame_positions.empty()) {
+ new_code = native_module_->GetCode(func_index);
+ } else {
+ // Add the new offset to the set of all breakpoints, then recompile.
+ if (!breakpoint_exists) all_breakpoints.insert(insertion_point, offset);
+ new_code =
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(all_breakpoints),
+ VectorOf(stack_frame_positions));
+ }
+ UpdateReturnAddresses(isolate, new_code, isolate_data.stepping_frame);
+ }
- UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
+ std::vector<int> FindAllBreakpoints(int func_index) {
+ DCHECK(!mutex_.TryLock()); // Mutex must be held externally.
+ std::set<int> breakpoints;
+ for (auto& data : per_isolate_data_) {
+ auto it = data.second.breakpoints_per_function.find(func_index);
+ if (it == data.second.breakpoints_per_function.end()) continue;
+ for (int offset : it->second) breakpoints.insert(offset);
+ }
+ return {breakpoints.begin(), breakpoints.end()};
}
void UpdateBreakpoints(int func_index, Vector<int> breakpoints,
- Isolate* current_isolate) {
+ Isolate* isolate, StackFrameId stepping_frame) {
+ DCHECK(!mutex_.TryLock()); // Mutex is held externally.
// Generate additional source positions for current stack frame positions.
// These source positions are used to find return addresses in the new code.
std::vector<int> stack_frame_positions =
- StackFramePositions(func_index, current_isolate);
+ StackFramePositions(func_index, isolate);
- WasmCodeRefScope wasm_code_ref_scope;
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
func_index, breakpoints, VectorOf(stack_frame_positions));
- UpdateReturnAddresses(current_isolate, new_code);
+ UpdateReturnAddresses(isolate, new_code, stepping_frame);
}
- void FloodWithBreakpoints(WasmFrame* frame, Isolate* current_isolate,
- ReturnLocation return_location) {
+ void FloodWithBreakpoints(WasmFrame* frame, ReturnLocation return_location) {
// 0 is an invalid offset used to indicate flooding.
int offset = 0;
WasmCodeRefScope wasm_code_ref_scope;
DCHECK(frame->wasm_code()->is_liftoff());
// Generate an additional source position for the current byte offset.
int byte_offset = frame->byte_offset();
+ base::MutexGuard guard(&mutex_);
WasmCode* new_code = RecompileLiftoffWithBreakpoints(
frame->function_index(), VectorOf(&offset, 1),
VectorOf(&byte_offset, 1));
@@ -579,37 +554,55 @@ class DebugInfoImpl {
return_location = kAfterWasmCall;
}
- FloodWithBreakpoints(frame, isolate, return_location);
- stepping_frame_ = frame->id();
+ FloodWithBreakpoints(frame, return_location);
+
+ base::MutexGuard guard(&mutex_);
+ per_isolate_data_[isolate].stepping_frame = frame->id();
}
- void ClearStepping() { stepping_frame_ = NO_ID; }
+ void ClearStepping(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ auto it = per_isolate_data_.find(isolate);
+ if (it != per_isolate_data_.end()) it->second.stepping_frame = NO_ID;
+ }
bool IsStepping(WasmFrame* frame) {
Isolate* isolate = frame->wasm_instance().GetIsolate();
- StepAction last_step_action = isolate->debug()->last_step_action();
- return stepping_frame_ == frame->id() || last_step_action == StepIn;
+ if (isolate->debug()->last_step_action() == StepIn) return true;
+ base::MutexGuard guard(&mutex_);
+ auto it = per_isolate_data_.find(isolate);
+ return it != per_isolate_data_.end() &&
+ it->second.stepping_frame == frame->id();
}
- void RemoveBreakpoint(int func_index, int position,
- Isolate* current_isolate) {
- std::vector<int> breakpoints_copy;
- {
- base::MutexGuard guard(&mutex_);
- const auto& function = native_module_->module()->functions[func_index];
- int offset = position - function.code.offset();
-
- std::vector<int>& breakpoints = breakpoints_per_function_[func_index];
- DCHECK_LT(0, offset);
- auto insertion_point =
- std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
- if (insertion_point == breakpoints.end()) return;
- if (*insertion_point != offset) return;
- breakpoints.erase(insertion_point);
- breakpoints_copy = breakpoints;
- }
+ void RemoveBreakpoint(int func_index, int position, Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
- UpdateBreakpoints(func_index, VectorOf(breakpoints_copy), current_isolate);
+ // Hold the mutex while modifying breakpoints, to ensure consistency when
+ // multiple isolates set/remove breakpoints at the same time.
+ base::MutexGuard guard(&mutex_);
+
+ const auto& function = native_module_->module()->functions[func_index];
+ int offset = position - function.code.offset();
+
+ auto& isolate_data = per_isolate_data_[isolate];
+ std::vector<int>& breakpoints =
+ isolate_data.breakpoints_per_function[func_index];
+ DCHECK_LT(0, offset);
+ auto insertion_point =
+ std::lower_bound(breakpoints.begin(), breakpoints.end(), offset);
+ if (insertion_point == breakpoints.end()) return;
+ if (*insertion_point != offset) return;
+ breakpoints.erase(insertion_point);
+
+ std::vector<int> remaining = FindAllBreakpoints(func_index);
+ // If the breakpoint is still set in another isolate, don't remove it.
+ DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
+ if (std::binary_search(remaining.begin(), remaining.end(), offset)) return;
+ UpdateBreakpoints(func_index, VectorOf(remaining), isolate,
+ isolate_data.stepping_frame);
}
void RemoveDebugSideTables(Vector<WasmCode* const> codes) {
@@ -619,15 +612,55 @@ class DebugInfoImpl {
}
}
+ DebugSideTable* GetDebugSideTableIfExists(const WasmCode* code) const {
+ base::MutexGuard guard(&mutex_);
+ auto it = debug_side_tables_.find(code);
+ return it == debug_side_tables_.end() ? nullptr : it->second.get();
+ }
+
+ static bool HasRemovedBreakpoints(const std::vector<int>& removed,
+ const std::vector<int>& remaining) {
+ DCHECK(std::is_sorted(remaining.begin(), remaining.end()));
+ for (int offset : removed) {
+ // Return true if we removed a breakpoint which is not part of remaining.
+ if (!std::binary_search(remaining.begin(), remaining.end(), offset)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void RemoveIsolate(Isolate* isolate) {
+ // Put the code ref scope outside of the mutex, so we don't unnecessarily
+ // hold the mutex while freeing code.
+ WasmCodeRefScope wasm_code_ref_scope;
+
+ base::MutexGuard guard(&mutex_);
+ auto per_isolate_data_it = per_isolate_data_.find(isolate);
+ if (per_isolate_data_it == per_isolate_data_.end()) return;
+ std::unordered_map<int, std::vector<int>> removed_per_function =
+ std::move(per_isolate_data_it->second.breakpoints_per_function);
+ per_isolate_data_.erase(per_isolate_data_it);
+ for (auto& entry : removed_per_function) {
+ int func_index = entry.first;
+ std::vector<int>& removed = entry.second;
+ std::vector<int> remaining = FindAllBreakpoints(func_index);
+ if (HasRemovedBreakpoints(removed, remaining)) {
+ RecompileLiftoffWithBreakpoints(func_index, VectorOf(remaining), {});
+ }
+ }
+ }
+
private:
struct FrameInspectionScope {
- FrameInspectionScope(DebugInfoImpl* debug_info, Isolate* isolate,
- Address pc)
- : code(isolate->wasm_engine()->code_manager()->LookupCode(pc)),
+ FrameInspectionScope(DebugInfoImpl* debug_info, Address pc)
+ : code(debug_info->native_module_->engine()->code_manager()->LookupCode(
+ pc)),
pc_offset(static_cast<int>(pc - code->instruction_start())),
debug_side_table(
code->is_inspectable()
- ? debug_info->GetDebugSideTable(code, isolate->allocator())
+ ? debug_info->GetDebugSideTable(
+ code, debug_info->native_module_->engine()->allocator())
: nullptr),
debug_side_table_entry(debug_side_table
? debug_side_table->GetEntry(pc_offset)
@@ -667,11 +700,17 @@ class DebugInfoImpl {
GenerateLiftoffDebugSideTable(allocator, &env, func_body);
DebugSideTable* ret = debug_side_table.get();
- // Install into cache and return.
+ // Check cache again, maybe another thread concurrently generated a debug
+ // side table already.
{
base::MutexGuard guard(&mutex_);
- debug_side_tables_[code] = std::move(debug_side_table);
+ auto& slot = debug_side_tables_[code];
+ if (slot != nullptr) return slot.get();
+ slot = std::move(debug_side_table);
}
+
+ // Print the code together with the debug table, if requested.
+ code->MaybePrint();
return ret;
}
@@ -741,15 +780,15 @@ class DebugInfoImpl {
// After installing a Liftoff code object with a different set of breakpoints,
// update return addresses on the stack so that execution resumes in the new
// code. The frame layout itself should be independent of breakpoints.
- // TODO(thibaudm): update other threads as well.
- void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code) {
+ void UpdateReturnAddresses(Isolate* isolate, WasmCode* new_code,
+ StackFrameId stepping_frame) {
// The first return location is after the breakpoint, others are after wasm
// calls.
ReturnLocation return_location = kAfterBreakpoint;
for (StackTraceFrameIterator it(isolate); !it.done();
it.Advance(), return_location = kAfterWasmCall) {
// We still need the flooded function for stepping.
- if (it.frame()->id() == stepping_frame_) continue;
+ if (it.frame()->id() == stepping_frame) continue;
if (!it.is_wasm()) continue;
WasmFrame* frame = WasmFrame::cast(it.frame());
if (frame->native_module() != new_code->native_module()) continue;
@@ -788,25 +827,32 @@ class DebugInfoImpl {
return static_cast<size_t>(position) == code.end_offset() - 1;
}
+ // Isolate-specific data, for debugging modules that are shared by multiple
+ // isolates.
+ struct PerIsolateDebugData {
+ // Keeps track of the currently set breakpoints (by offset within that
+ // function).
+ std::unordered_map<int, std::vector<int>> breakpoints_per_function;
+
+ // Store the frame ID when stepping, to avoid overwriting that frame when
+ // setting or removing a breakpoint.
+ StackFrameId stepping_frame = NO_ID;
+ };
+
NativeModule* const native_module_;
// {mutex_} protects all fields below.
mutable base::Mutex mutex_;
// DebugSideTable per code object, lazily initialized.
- std::unordered_map<WasmCode*, std::unique_ptr<DebugSideTable>>
+ std::unordered_map<const WasmCode*, std::unique_ptr<DebugSideTable>>
debug_side_tables_;
// Names of locals, lazily decoded from the wire bytes.
std::unique_ptr<LocalNames> local_names_;
- // Keeps track of the currently set breakpoints (by offset within that
- // function).
- std::unordered_map<int, std::vector<int>> breakpoints_per_function_;
-
- // Store the frame ID when stepping, to avoid overwriting that frame when
- // setting or removing a breakpoint.
- StackFrameId stepping_frame_ = NO_ID;
+ // Isolate-specific data.
+ std::unordered_map<Isolate*, PerIsolateDebugData> per_isolate_data_;
DISALLOW_COPY_AND_ASSIGN(DebugInfoImpl);
};
@@ -816,22 +862,18 @@ DebugInfo::DebugInfo(NativeModule* native_module)
DebugInfo::~DebugInfo() = default;
-int DebugInfo::GetNumLocals(Isolate* isolate, Address pc) {
- return impl_->GetNumLocals(isolate, pc);
-}
+int DebugInfo::GetNumLocals(Address pc) { return impl_->GetNumLocals(pc); }
-WasmValue DebugInfo::GetLocalValue(int local, Isolate* isolate, Address pc,
- Address fp, Address debug_break_fp) {
- return impl_->GetLocalValue(local, isolate, pc, fp, debug_break_fp);
+WasmValue DebugInfo::GetLocalValue(int local, Address pc, Address fp,
+ Address debug_break_fp) {
+ return impl_->GetLocalValue(local, pc, fp, debug_break_fp);
}
-int DebugInfo::GetStackDepth(Isolate* isolate, Address pc) {
- return impl_->GetStackDepth(isolate, pc);
-}
+int DebugInfo::GetStackDepth(Address pc) { return impl_->GetStackDepth(pc); }
-WasmValue DebugInfo::GetStackValue(int index, Isolate* isolate, Address pc,
- Address fp, Address debug_break_fp) {
- return impl_->GetStackValue(index, isolate, pc, fp, debug_break_fp);
+WasmValue DebugInfo::GetStackValue(int index, Address pc, Address fp,
+ Address debug_break_fp) {
+ return impl_->GetStackValue(index, pc, fp, debug_break_fp);
}
Handle<JSObject> DebugInfo::GetLocalScopeObject(Isolate* isolate, Address pc,
@@ -859,7 +901,9 @@ void DebugInfo::PrepareStep(Isolate* isolate, StackFrameId break_frame_id) {
impl_->PrepareStep(isolate, break_frame_id);
}
-void DebugInfo::ClearStepping() { impl_->ClearStepping(); }
+void DebugInfo::ClearStepping(Isolate* isolate) {
+ impl_->ClearStepping(isolate);
+}
bool DebugInfo::IsStepping(WasmFrame* frame) {
return impl_->IsStepping(frame);
@@ -874,65 +918,16 @@ void DebugInfo::RemoveDebugSideTables(Vector<WasmCode* const> code) {
impl_->RemoveDebugSideTables(code);
}
-} // namespace wasm
-
-Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
- DCHECK(!instance->has_debug_info());
- Factory* factory = instance->GetIsolate()->factory();
- Handle<Cell> stack_cell = factory->NewCell(factory->empty_fixed_array());
- Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(
- factory->NewStruct(WASM_DEBUG_INFO_TYPE, AllocationType::kOld));
- debug_info->set_wasm_instance(*instance);
- debug_info->set_interpreter_reference_stack(*stack_cell);
- instance->set_debug_info(*debug_info);
- return debug_info;
+DebugSideTable* DebugInfo::GetDebugSideTableIfExists(
+ const WasmCode* code) const {
+ return impl_->GetDebugSideTableIfExists(code);
}
-wasm::WasmInterpreter* WasmDebugInfo::SetupForTesting(
- Handle<WasmInstanceObject> instance_obj) {
- Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
- Isolate* isolate = instance_obj->GetIsolate();
- // Use the maximum stack size to estimate the maximum size of the interpreter.
- // The interpreter keeps its own stack internally, and the size of the stack
- // should dominate the overall size of the interpreter. We multiply by '2' to
- // account for the growing strategy for the backing store of the stack.
- size_t interpreter_size = FLAG_stack_size * KB * 2;
- auto interp_handle = Managed<wasm::InterpreterHandle>::Allocate(
- isolate, interpreter_size, isolate, debug_info);
- debug_info->set_interpreter_handle(*interp_handle);
- return interp_handle->raw()->interpreter();
+void DebugInfo::RemoveIsolate(Isolate* isolate) {
+ return impl_->RemoveIsolate(isolate);
}
-// static
-Handle<Code> WasmDebugInfo::GetCWasmEntry(Handle<WasmDebugInfo> debug_info,
- const wasm::FunctionSig* sig) {
- Isolate* isolate = debug_info->GetIsolate();
- DCHECK_EQ(debug_info->has_c_wasm_entries(),
- debug_info->has_c_wasm_entry_map());
- if (!debug_info->has_c_wasm_entries()) {
- auto entries = isolate->factory()->NewFixedArray(4, AllocationType::kOld);
- debug_info->set_c_wasm_entries(*entries);
- size_t map_size = 0; // size estimate not so important here.
- auto managed_map = Managed<wasm::SignatureMap>::Allocate(isolate, map_size);
- debug_info->set_c_wasm_entry_map(*managed_map);
- }
- Handle<FixedArray> entries(debug_info->c_wasm_entries(), isolate);
- wasm::SignatureMap* map = debug_info->c_wasm_entry_map().raw();
- int32_t index = map->Find(*sig);
- if (index == -1) {
- index = static_cast<int32_t>(map->FindOrInsert(*sig));
- if (index == entries->length()) {
- entries =
- isolate->factory()->CopyFixedArrayAndGrow(entries, entries->length());
- debug_info->set_c_wasm_entries(*entries);
- }
- DCHECK(entries->get(index).IsUndefined(isolate));
- Handle<Code> new_entry_code =
- compiler::CompileCWasmEntry(isolate, sig).ToHandleChecked();
- entries->set(index, *new_entry_code);
- }
- return handle(Code::cast(entries->get(index)), isolate);
-}
+} // namespace wasm
namespace {
diff --git a/chromium/v8/src/wasm/wasm-debug.h b/chromium/v8/src/wasm/wasm-debug.h
index 1eacd6ff526..6050cb3a58b 100644
--- a/chromium/v8/src/wasm/wasm-debug.h
+++ b/chromium/v8/src/wasm/wasm-debug.h
@@ -91,6 +91,8 @@ class DebugSideTable {
return values_[index].reg_code;
}
+ void Print(std::ostream&) const;
+
private:
int pc_offset_;
std::vector<Value> values_;
@@ -120,6 +122,8 @@ class DebugSideTable {
int num_locals() const { return num_locals_; }
+ void Print(std::ostream&) const;
+
private:
struct EntryPositionLess {
bool operator()(const Entry& a, const Entry& b) const {
@@ -145,11 +149,11 @@ class V8_EXPORT_PRIVATE DebugInfo {
// For the frame inspection methods below:
// {fp} is the frame pointer of the Liftoff frame, {debug_break_fp} that of
// the {WasmDebugBreak} frame (if any).
- int GetNumLocals(Isolate*, Address pc);
- WasmValue GetLocalValue(int local, Isolate*, Address pc, Address fp,
+ int GetNumLocals(Address pc);
+ WasmValue GetLocalValue(int local, Address pc, Address fp,
Address debug_break_fp);
- int GetStackDepth(Isolate*, Address pc);
- WasmValue GetStackValue(int index, Isolate*, Address pc, Address fp,
+ int GetStackDepth(Address pc);
+ WasmValue GetStackValue(int index, Address pc, Address fp,
Address debug_break_fp);
Handle<JSObject> GetLocalScopeObject(Isolate*, Address pc, Address fp,
@@ -164,7 +168,7 @@ class V8_EXPORT_PRIVATE DebugInfo {
void PrepareStep(Isolate*, StackFrameId);
- void ClearStepping();
+ void ClearStepping(Isolate*);
bool IsStepping(WasmFrame*);
@@ -172,6 +176,12 @@ class V8_EXPORT_PRIVATE DebugInfo {
void RemoveDebugSideTables(Vector<WasmCode* const>);
+ // Return the debug side table for the given code object, but only if it has
+ // already been created. This will never trigger generation of the table.
+ DebugSideTable* GetDebugSideTableIfExists(const WasmCode*) const;
+
+ void RemoveIsolate(Isolate*);
+
private:
std::unique_ptr<DebugInfoImpl> impl_;
};
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index 324d1b1d49c..133825122fd 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -22,6 +22,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
#include "src/wasm/streaming-decoder.h"
+#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -54,7 +55,7 @@ class LogCodesTask : public Task {
DCHECK_NOT_NULL(isolate);
}
- ~LogCodesTask() {
+ ~LogCodesTask() override {
// If the platform deletes this task before executing it, we also deregister
// it to avoid use-after-free from still-running background threads.
if (!cancelled()) DeregisterTask();
@@ -343,9 +344,8 @@ struct WasmEngine::IsolateInfo {
}
#endif
- // All native modules that are being used by this Isolate (currently only
- // grows, never shrinks).
- std::set<NativeModule*> native_modules;
+ // All native modules that are being used by this Isolate.
+ std::unordered_map<NativeModule*, std::weak_ptr<NativeModule>> native_modules;
// Scripts created for each native module in this isolate.
std::unordered_map<NativeModule*, WeakScriptHandle> scripts;
@@ -409,6 +409,7 @@ WasmEngine::~WasmEngine() {
bool WasmEngine::SyncValidate(Isolate* isolate, const WasmFeatures& enabled,
const ModuleWireBytes& bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncValidate");
// TODO(titzer): remove dependency on the isolate.
if (bytes.start() == nullptr || bytes.length() == 0) return false;
ModuleResult result =
@@ -421,6 +422,7 @@ MaybeHandle<AsmWasmData> WasmEngine::SyncCompileTranslatedAsmJs(
Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
Vector<const byte> asm_js_offset_table_bytes,
Handle<HeapNumber> uses_bitset, LanguageMode language_mode) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncCompileTranslatedAsmJs");
ModuleOrigin origin = language_mode == LanguageMode::kSloppy
? kAsmJsSloppyOrigin
: kAsmJsStrictOrigin;
@@ -464,6 +466,7 @@ Handle<WasmModuleObject> WasmEngine::FinalizeTranslatedAsmJs(
MaybeHandle<WasmModuleObject> WasmEngine::SyncCompile(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
const ModuleWireBytes& bytes) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncCompile");
ModuleResult result =
DecodeWasmModule(enabled, bytes.start(), bytes.end(), false, kWasmOrigin,
isolate->counters(), allocator());
@@ -509,6 +512,7 @@ MaybeHandle<WasmInstanceObject> WasmEngine::SyncInstantiate(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory) {
+ TRACE_EVENT0("v8.wasm", "wasm.SyncInstantiate");
return InstantiateToInstanceObject(isolate, thrower, module_object, imports,
memory);
}
@@ -517,6 +521,7 @@ void WasmEngine::AsyncInstantiate(
Isolate* isolate, std::unique_ptr<InstantiationResultResolver> resolver,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports) {
ErrorThrower thrower(isolate, "WebAssembly.instantiate()");
+ TRACE_EVENT0("v8.wasm", "wasm.AsyncInstantiate");
// Instantiate a TryCatch so that caught exceptions won't progagate out.
// They will still be set as pending exceptions on the isolate.
// TODO(clemensb): Avoid TryCatch, use Execution::TryCall internally to invoke
@@ -552,6 +557,7 @@ void WasmEngine::AsyncCompile(
std::shared_ptr<CompilationResultResolver> resolver,
const ModuleWireBytes& bytes, bool is_shared,
const char* api_method_name_for_errors) {
+ TRACE_EVENT0("v8.wasm", "wasm.AsyncCompile");
if (!FLAG_wasm_async_compilation) {
// Asynchronous compilation disabled; fall back on synchronous compilation.
ErrorThrower thrower(isolate, api_method_name_for_errors);
@@ -600,10 +606,15 @@ std::shared_ptr<StreamingDecoder> WasmEngine::StartStreamingCompilation(
Isolate* isolate, const WasmFeatures& enabled, Handle<Context> context,
const char* api_method_name,
std::shared_ptr<CompilationResultResolver> resolver) {
- AsyncCompileJob* job =
- CreateAsyncCompileJob(isolate, enabled, std::unique_ptr<byte[]>(nullptr),
- 0, context, api_method_name, std::move(resolver));
- return job->CreateStreamingDecoder();
+ TRACE_EVENT0("v8.wasm", "wasm.StartStreamingCompilation");
+ if (FLAG_wasm_async_compilation) {
+ AsyncCompileJob* job = CreateAsyncCompileJob(
+ isolate, enabled, std::unique_ptr<byte[]>(nullptr), 0, context,
+ api_method_name, std::move(resolver));
+ return job->CreateStreamingDecoder();
+ }
+ return StreamingDecoder::CreateSyncStreamingDecoder(
+ isolate, enabled, context, api_method_name, std::move(resolver));
}
void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
@@ -616,25 +627,27 @@ void WasmEngine::CompileFunction(Isolate* isolate, NativeModule* native_module,
}
void WasmEngine::TierDownAllModulesPerIsolate(Isolate* isolate) {
- std::vector<NativeModule*> native_modules;
+ std::vector<std::shared_ptr<NativeModule>> native_modules;
{
base::MutexGuard lock(&mutex_);
if (isolates_[isolate]->keep_tiered_down) return;
isolates_[isolate]->keep_tiered_down = true;
- for (auto* native_module : isolates_[isolate]->native_modules) {
- native_modules.push_back(native_module);
- native_module->SetTieringState(kTieredDown);
+ for (auto& entry : isolates_[isolate]->native_modules) {
+ entry.first->SetTieringState(kTieredDown);
+ if (auto shared_ptr = entry.second.lock()) {
+ native_modules.emplace_back(std::move(shared_ptr));
+ }
}
}
- for (auto* native_module : native_modules) {
- native_module->TriggerRecompilation();
+ for (auto& native_module : native_modules) {
+ native_module->RecompileForTiering();
}
}
void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
// Only trigger recompilation after releasing the mutex, otherwise we risk
// deadlocks because of lock inversion.
- std::vector<NativeModule*> native_modules_to_recompile;
+ std::vector<std::shared_ptr<NativeModule>> native_modules_to_recompile;
{
base::MutexGuard lock(&mutex_);
isolates_[isolate]->keep_tiered_down = false;
@@ -646,17 +659,20 @@ void WasmEngine::TierUpAllModulesPerIsolate(Isolate* isolate) {
}
return false;
};
- for (auto* native_module : isolates_[isolate]->native_modules) {
+ for (auto& entry : isolates_[isolate]->native_modules) {
+ auto* native_module = entry.first;
if (!native_module->IsTieredDown()) continue;
// Only start tier-up if no other isolate needs this modules in tiered
// down state.
if (test_keep_tiered_down(native_module)) continue;
native_module->SetTieringState(kTieredUp);
- native_modules_to_recompile.push_back(native_module);
+ if (auto shared_ptr = entry.second.lock()) {
+ native_modules_to_recompile.emplace_back(std::move(shared_ptr));
+ }
}
}
- for (auto* native_module : native_modules_to_recompile) {
- native_module->TriggerRecompilation();
+ for (auto& native_module : native_modules_to_recompile) {
+ native_module->RecompileForTiering();
}
}
@@ -762,11 +778,12 @@ Handle<WasmModuleObject> WasmEngine::ImportNativeModule(
Handle<FixedArray> export_wrappers;
CompileJsToWasmWrappers(isolate, native_module->module(), &export_wrappers);
Handle<WasmModuleObject> module_object = WasmModuleObject::New(
- isolate, std::move(shared_native_module), script, export_wrappers);
+ isolate, shared_native_module, script, export_wrappers);
{
base::MutexGuard lock(&mutex_);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_[isolate]->native_modules.insert(native_module);
+ isolates_[isolate]->native_modules.emplace(native_module,
+ std::move(shared_native_module));
DCHECK_EQ(1, native_modules_.count(native_module));
native_modules_[native_module]->isolates.insert(isolate);
}
@@ -885,8 +902,8 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
WasmEngine* engine = isolate->wasm_engine();
base::MutexGuard lock(&engine->mutex_);
DCHECK_EQ(1, engine->isolates_.count(isolate));
- for (auto* native_module : engine->isolates_[isolate]->native_modules) {
- native_module->SampleCodeSize(counters, NativeModule::kSampling);
+ for (auto& entry : engine->isolates_[isolate]->native_modules) {
+ entry.first->SampleCodeSize(counters, NativeModule::kSampling);
}
};
isolate->heap()->AddGCEpilogueCallback(callback, v8::kGCTypeMarkSweepCompact,
@@ -910,7 +927,8 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
DCHECK_NE(isolates_.end(), it);
std::unique_ptr<IsolateInfo> info = std::move(it->second);
isolates_.erase(it);
- for (NativeModule* native_module : info->native_modules) {
+ for (auto& entry : info->native_modules) {
+ auto* native_module = entry.first;
DCHECK_EQ(1, native_modules_.count(native_module));
DCHECK_EQ(1, native_modules_[native_module]->isolates.count(isolate));
auto* info = native_modules_[native_module].get();
@@ -920,6 +938,9 @@ void WasmEngine::RemoveIsolate(Isolate* isolate) {
current_gc_info_->dead_code.erase(code);
}
}
+ if (native_module->HasDebugInfo()) {
+ native_module->GetDebugInfo()->RemoveIsolate(isolate);
+ }
}
if (current_gc_info_) {
if (RemoveIsolateFromCurrentGC(isolate)) PotentiallyFinishCurrentGC();
@@ -1002,7 +1023,7 @@ std::shared_ptr<NativeModule> WasmEngine::NewNativeModule(
DCHECK(pair.second); // inserted new entry.
pair.first->second.get()->isolates.insert(isolate);
auto& modules_per_isolate = isolates_[isolate]->native_modules;
- modules_per_isolate.insert(native_module.get());
+ modules_per_isolate.emplace(native_module.get(), native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
}
@@ -1025,14 +1046,15 @@ std::shared_ptr<NativeModule> WasmEngine::MaybeGetNativeModule(
native_module_info = std::make_unique<NativeModuleInfo>();
}
native_module_info->isolates.insert(isolate);
- isolates_[isolate]->native_modules.insert(native_module.get());
+ isolates_[isolate]->native_modules.emplace(native_module.get(),
+ native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->SetTieringState(kTieredDown);
recompile_module = true;
}
}
// Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->TriggerRecompilation();
+ if (recompile_module) native_module->RecompileForTiering();
return native_module;
}
@@ -1054,14 +1076,15 @@ bool WasmEngine::UpdateNativeModuleCache(
DCHECK_EQ(1, native_modules_.count(native_module->get()));
native_modules_[native_module->get()]->isolates.insert(isolate);
DCHECK_EQ(1, isolates_.count(isolate));
- isolates_[isolate]->native_modules.insert(native_module->get());
+ isolates_[isolate]->native_modules.emplace(native_module->get(),
+ *native_module);
if (isolates_[isolate]->keep_tiered_down) {
native_module->get()->SetTieringState(kTieredDown);
recompile_module = true;
}
}
// Potentially recompile the module for tier down, after releasing the mutex.
- if (recompile_module) native_module->get()->TriggerRecompilation();
+ if (recompile_module) native_module->get()->RecompileForTiering();
return false;
}
@@ -1154,7 +1177,7 @@ void WasmEngine::SampleTopTierCodeSizeInAllIsolates(
void WasmEngine::ReportLiveCodeForGC(Isolate* isolate,
Vector<WasmCode*> live_code) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "ReportLiveCodeForGC");
+ TRACE_EVENT0("v8.wasm", "wasm.ReportLiveCodeForGC");
TRACE_CODE_GC("Isolate %d reporting %zu live code objects.\n", isolate->id(),
live_code.size());
base::MutexGuard guard(&mutex_);
@@ -1227,7 +1250,7 @@ void WasmEngine::FreeDeadCode(const DeadCodeMap& dead_code) {
}
void WasmEngine::FreeDeadCodeLocked(const DeadCodeMap& dead_code) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "FreeDeadCode");
+ TRACE_EVENT0("v8.wasm", "wasm.FreeDeadCode");
DCHECK(!mutex_.TryLock());
for (auto& dead_code_entry : dead_code) {
NativeModule* native_module = dead_code_entry.first;
diff --git a/chromium/v8/src/wasm/wasm-external-refs.cc b/chromium/v8/src/wasm/wasm-external-refs.cc
index 6dbb9393849..43617a8599a 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.cc
+++ b/chromium/v8/src/wasm/wasm-external-refs.cc
@@ -230,6 +230,82 @@ int32_t float64_to_uint64_wrapper(Address data) {
return 0;
}
+void float32_to_int64_sat_wrapper(Address data) {
+ float input = ReadUnalignedValue<float>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<float>(std::numeric_limits<int64_t>::max()) &&
+ input >= static_cast<float>(std::numeric_limits<int64_t>::min())) {
+ WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
+ return;
+ }
+ if (std::isnan(input)) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return;
+ }
+ if (input < 0.0) {
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::min());
+ return;
+ }
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::max());
+}
+
+void float32_to_uint64_sat_wrapper(Address data) {
+ float input = ReadUnalignedValue<float>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within uint64 range which are actually
+ // not within uint64 range.
+ if (input < static_cast<float>(std::numeric_limits<uint64_t>::max()) &&
+ input >= 0.0) {
+ WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
+ return;
+ }
+ if (input >= std::numeric_limits<uint64_t>::max()) {
+ WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
+ return;
+ }
+ WriteUnalignedValue<uint64_t>(data, 0);
+}
+
+void float64_to_int64_sat_wrapper(Address data) {
+ double input = ReadUnalignedValue<double>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<double>(std::numeric_limits<int64_t>::max()) &&
+ input >= static_cast<double>(std::numeric_limits<int64_t>::min())) {
+ WriteUnalignedValue<int64_t>(data, static_cast<int64_t>(input));
+ return;
+ }
+ if (std::isnan(input)) {
+ WriteUnalignedValue<int64_t>(data, 0);
+ return;
+ }
+ if (input < 0.0) {
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::min());
+ return;
+ }
+ WriteUnalignedValue<int64_t>(data, std::numeric_limits<int64_t>::max());
+}
+
+void float64_to_uint64_sat_wrapper(Address data) {
+ double input = ReadUnalignedValue<double>(data);
+ // We use "<" here to check the upper bound because of rounding problems: With
+ // "<=" some inputs would be considered within int64 range which are actually
+ // not within int64 range.
+ if (input < static_cast<double>(std::numeric_limits<uint64_t>::max()) &&
+ input >= 0.0) {
+ WriteUnalignedValue<uint64_t>(data, static_cast<uint64_t>(input));
+ return;
+ }
+ if (input >= std::numeric_limits<uint64_t>::max()) {
+ WriteUnalignedValue<uint64_t>(data, std::numeric_limits<uint64_t>::max());
+ return;
+ }
+ WriteUnalignedValue<uint64_t>(data, 0);
+}
+
int32_t int64_div_wrapper(Address data) {
int64_t dividend = ReadUnalignedValue<int64_t>(data);
int64_t divisor = ReadUnalignedValue<int64_t>(data + sizeof(dividend));
@@ -325,6 +401,28 @@ void float64_pow_wrapper(Address data) {
WriteUnalignedValue<double>(data, base::ieee754::pow(x, y));
}
+template <typename T, T (*float_round_op)(T)>
+void simd_float_round_wrapper(Address data) {
+ constexpr int n = kSimd128Size / sizeof(T);
+ for (int i = 0; i < n; i++) {
+ WriteUnalignedValue<T>(
+ data + (i * sizeof(T)),
+ float_round_op(ReadUnalignedValue<T>(data + (i * sizeof(T)))));
+ }
+}
+
+void f32x4_ceil_wrapper(Address data) {
+ simd_float_round_wrapper<float, &ceilf>(data);
+}
+
+void f32x4_floor_wrapper(Address data) {
+ simd_float_round_wrapper<float, &floorf>(data);
+}
+
+void f32x4_trunc_wrapper(Address data) {
+ simd_float_round_wrapper<float, &truncf>(data);
+}
+
namespace {
class ThreadNotInWasmScope {
// Asan on Windows triggers exceptions to allocate shadow memory lazily. When
@@ -402,13 +500,13 @@ int32_t memory_init_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t seg_index = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
- size_t seg_size = instance.data_segment_sizes()[seg_index];
- if (!base::IsInBounds(src, size, seg_size)) return kOutOfBounds;
+ uint32_t seg_size = instance.data_segment_sizes()[seg_index];
+ if (!base::IsInBounds<uint32_t>(src, size, seg_size)) return kOutOfBounds;
byte* seg_start =
reinterpret_cast<byte*>(instance.data_segment_starts()[seg_index]);
@@ -427,11 +525,11 @@ int32_t memory_copy_wrapper(Address data) {
WasmInstanceObject instance = WasmInstanceObject::cast(raw_instance);
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint32_t src = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
- if (!base::IsInBounds(src, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
+ if (!base::IsInBounds<uint64_t>(src, size, mem_size)) return kOutOfBounds;
// Use std::memmove, because the ranges can overlap.
std::memmove(EffectiveAddress(instance, dst), EffectiveAddress(instance, src),
@@ -452,10 +550,10 @@ int32_t memory_fill_wrapper(Address data) {
uint32_t dst = ReadAndIncrementOffset<uint32_t>(data, &offset);
uint8_t value =
static_cast<uint8_t>(ReadAndIncrementOffset<uint32_t>(data, &offset));
- size_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
+ uint32_t size = ReadAndIncrementOffset<uint32_t>(data, &offset);
- size_t mem_size = instance.memory_size();
- if (!base::IsInBounds(dst, size, mem_size)) return kOutOfBounds;
+ uint64_t mem_size = instance.memory_size();
+ if (!base::IsInBounds<uint64_t>(dst, size, mem_size)) return kOutOfBounds;
std::memset(EffectiveAddress(instance, dst), value, size);
return kSuccess;
diff --git a/chromium/v8/src/wasm/wasm-external-refs.h b/chromium/v8/src/wasm/wasm-external-refs.h
index 0a2d5f30602..b41d44e4435 100644
--- a/chromium/v8/src/wasm/wasm-external-refs.h
+++ b/chromium/v8/src/wasm/wasm-external-refs.h
@@ -45,6 +45,14 @@ V8_EXPORT_PRIVATE int32_t float64_to_int64_wrapper(Address data);
V8_EXPORT_PRIVATE int32_t float64_to_uint64_wrapper(Address data);
+V8_EXPORT_PRIVATE void float32_to_int64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float32_to_uint64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float64_to_int64_sat_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void float64_to_uint64_sat_wrapper(Address data);
+
V8_EXPORT_PRIVATE int32_t int64_div_wrapper(Address data);
V8_EXPORT_PRIVATE int32_t int64_mod_wrapper(Address data);
@@ -71,6 +79,12 @@ V8_EXPORT_PRIVATE void word64_ror_wrapper(Address data);
V8_EXPORT_PRIVATE void float64_pow_wrapper(Address data);
+V8_EXPORT_PRIVATE void f32x4_ceil_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void f32x4_floor_wrapper(Address data);
+
+V8_EXPORT_PRIVATE void f32x4_trunc_wrapper(Address data);
+
// The return type is {int32_t} instead of {bool} to enforce the compiler to
// zero-extend the result in the return register.
int32_t memory_init_wrapper(Address data);
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index ab8eb612a85..2450608f141 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -33,7 +33,12 @@
/* Official proposal: https://github.com/WebAssembly/gc */ \
/* Prototype engineering spec: https://bit.ly/3cWcm6Q */ \
/* V8 side owner: jkummerow */ \
- V(gc, "garbage collection", false)
+ V(gc, "garbage collection", false) \
+ \
+ /* Typed function references proposal. */ \
+ /* Official proposal: https://github.com/WebAssembly/function-references */ \
+ /* V8 side owner: ahaas */ \
+ V(typed_funcref, "typed function references", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -44,24 +49,18 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Reference Types, a.k.a. anyref proposal. */ \
- /* https://github.com/WebAssembly/reference-types */ \
- /* V8 side owner: ahaas */ \
- /* Staged in v7.8. */ \
- V(anyref, "anyref opcodes", false) \
- \
- /* JS BitInt to wasm i64 integration. */ \
- /* https://github.com/WebAssembly/JS-BigInt-integration */ \
- /* V8 side owner: ahaas, ssauleau@igalia.com */ \
- /* Staged in v7.9. */ \
- V(bigint, "JS BigInt support", false) \
- \
/* Multi-value proposal. */ \
/* https://github.com/WebAssembly/multi-value */ \
/* V8 side owner: thibaudm */ \
/* Staged in v8.0. */ \
V(mv, "multi-value support", false) \
\
+ /* Reference Types, a.k.a. reftypes proposal. */ \
+ /* https://github.com/WebAssembly/reference-types */ \
+ /* V8 side owner: ahaas */ \
+ /* Staged in v7.8. */ \
+ V(reftypes, "reference type opcodes", false) \
+ \
/* Threads proposal. */ \
/* https://github.com/webassembly/threads */ \
/* NOTE: This is enabled via chromium flag on desktop systems since v7.4 */ \
@@ -80,6 +79,14 @@
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
+ /* JS BigInt to wasm i64 integration. */ \
+ /* https://github.com/WebAssembly/JS-BigInt-integration */ \
+ /* V8 side owner: ahaas, ssauleau@igalia.com */ \
+ /* Shipped in v8.5. */ \
+ /* ITS: https://groups.google.com/a/chromium.org/g/blink-dev/c/ */ \
+ /* g4QKRUQV1-0/m/jdWjD1uZAAAJ */ \
+ V(bigint, "JS BigInt support", true) \
+ \
/* Bulk memory operations. */ \
/* https://github.com/webassembly/bulk-memory-operations */ \
/* V8 side owner: binji */ \
diff --git a/chromium/v8/src/wasm/wasm-interpreter.cc b/chromium/v8/src/wasm/wasm-interpreter.cc
deleted file mode 100644
index 96255ef8180..00000000000
--- a/chromium/v8/src/wasm/wasm-interpreter.cc
+++ /dev/null
@@ -1,4456 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <atomic>
-#include <type_traits>
-
-#include "src/wasm/wasm-interpreter.h"
-
-#include "src/base/overflowing-math.h"
-#include "src/codegen/assembler-inl.h"
-#include "src/compiler/wasm-compiler.h"
-#include "src/numbers/conversions.h"
-#include "src/objects/objects-inl.h"
-#include "src/utils/boxed-float.h"
-#include "src/utils/identity-map.h"
-#include "src/utils/utils.h"
-#include "src/wasm/decoder.h"
-#include "src/wasm/function-body-decoder-impl.h"
-#include "src/wasm/function-body-decoder.h"
-#include "src/wasm/memory-tracing.h"
-#include "src/wasm/module-compiler.h"
-#include "src/wasm/wasm-arguments.h"
-#include "src/wasm/wasm-engine.h"
-#include "src/wasm/wasm-external-refs.h"
-#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
-#include "src/zone/accounting-allocator.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-using base::ReadLittleEndianValue;
-using base::ReadUnalignedValue;
-using base::WriteLittleEndianValue;
-using base::WriteUnalignedValue;
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
- } while (false)
-
-#if V8_TARGET_BIG_ENDIAN
-#define LANE(i, type) ((sizeof(type.val) / sizeof(type.val[0])) - (i)-1)
-#else
-#define LANE(i, type) (i)
-#endif
-
-#define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
-
-#define FOREACH_SIMPLE_BINOP(V) \
- V(I32Add, uint32_t, +) \
- V(I32Sub, uint32_t, -) \
- V(I32Mul, uint32_t, *) \
- V(I32And, uint32_t, &) \
- V(I32Ior, uint32_t, |) \
- V(I32Xor, uint32_t, ^) \
- V(I32Eq, uint32_t, ==) \
- V(I32Ne, uint32_t, !=) \
- V(I32LtU, uint32_t, <) \
- V(I32LeU, uint32_t, <=) \
- V(I32GtU, uint32_t, >) \
- V(I32GeU, uint32_t, >=) \
- V(I32LtS, int32_t, <) \
- V(I32LeS, int32_t, <=) \
- V(I32GtS, int32_t, >) \
- V(I32GeS, int32_t, >=) \
- V(I64Add, uint64_t, +) \
- V(I64Sub, uint64_t, -) \
- V(I64Mul, uint64_t, *) \
- V(I64And, uint64_t, &) \
- V(I64Ior, uint64_t, |) \
- V(I64Xor, uint64_t, ^) \
- V(I64Eq, uint64_t, ==) \
- V(I64Ne, uint64_t, !=) \
- V(I64LtU, uint64_t, <) \
- V(I64LeU, uint64_t, <=) \
- V(I64GtU, uint64_t, >) \
- V(I64GeU, uint64_t, >=) \
- V(I64LtS, int64_t, <) \
- V(I64LeS, int64_t, <=) \
- V(I64GtS, int64_t, >) \
- V(I64GeS, int64_t, >=) \
- V(F32Add, float, +) \
- V(F32Sub, float, -) \
- V(F32Eq, float, ==) \
- V(F32Ne, float, !=) \
- V(F32Lt, float, <) \
- V(F32Le, float, <=) \
- V(F32Gt, float, >) \
- V(F32Ge, float, >=) \
- V(F64Add, double, +) \
- V(F64Sub, double, -) \
- V(F64Eq, double, ==) \
- V(F64Ne, double, !=) \
- V(F64Lt, double, <) \
- V(F64Le, double, <=) \
- V(F64Gt, double, >) \
- V(F64Ge, double, >=) \
- V(F32Mul, float, *) \
- V(F64Mul, double, *) \
- V(F32Div, float, /) \
- V(F64Div, double, /)
-
-#define FOREACH_OTHER_BINOP(V) \
- V(I32DivS, int32_t) \
- V(I32DivU, uint32_t) \
- V(I32RemS, int32_t) \
- V(I32RemU, uint32_t) \
- V(I32Shl, uint32_t) \
- V(I32ShrU, uint32_t) \
- V(I32ShrS, int32_t) \
- V(I64DivS, int64_t) \
- V(I64DivU, uint64_t) \
- V(I64RemS, int64_t) \
- V(I64RemU, uint64_t) \
- V(I64Shl, uint64_t) \
- V(I64ShrU, uint64_t) \
- V(I64ShrS, int64_t) \
- V(I32Ror, int32_t) \
- V(I32Rol, int32_t) \
- V(I64Ror, int64_t) \
- V(I64Rol, int64_t) \
- V(F32Min, float) \
- V(F32Max, float) \
- V(F64Min, double) \
- V(F64Max, double) \
- V(I32AsmjsDivS, int32_t) \
- V(I32AsmjsDivU, uint32_t) \
- V(I32AsmjsRemS, int32_t) \
- V(I32AsmjsRemU, uint32_t) \
- V(F32CopySign, Float32) \
- V(F64CopySign, Float64)
-
-#define FOREACH_I32CONV_FLOATOP(V) \
- V(I32SConvertF32, int32_t, float) \
- V(I32SConvertF64, int32_t, double) \
- V(I32UConvertF32, uint32_t, float) \
- V(I32UConvertF64, uint32_t, double)
-
-#define FOREACH_OTHER_UNOP(V) \
- V(I32Clz, uint32_t) \
- V(I32Ctz, uint32_t) \
- V(I32Popcnt, uint32_t) \
- V(I32Eqz, uint32_t) \
- V(I64Clz, uint64_t) \
- V(I64Ctz, uint64_t) \
- V(I64Popcnt, uint64_t) \
- V(I64Eqz, uint64_t) \
- V(F32Abs, Float32) \
- V(F32Neg, Float32) \
- V(F32Ceil, float) \
- V(F32Floor, float) \
- V(F32Trunc, float) \
- V(F32NearestInt, float) \
- V(F64Abs, Float64) \
- V(F64Neg, Float64) \
- V(F64Ceil, double) \
- V(F64Floor, double) \
- V(F64Trunc, double) \
- V(F64NearestInt, double) \
- V(I32ConvertI64, int64_t) \
- V(I64SConvertF32, float) \
- V(I64SConvertF64, double) \
- V(I64UConvertF32, float) \
- V(I64UConvertF64, double) \
- V(I64SConvertI32, int32_t) \
- V(I64UConvertI32, uint32_t) \
- V(F32SConvertI32, int32_t) \
- V(F32UConvertI32, uint32_t) \
- V(F32SConvertI64, int64_t) \
- V(F32UConvertI64, uint64_t) \
- V(F32ConvertF64, double) \
- V(F32ReinterpretI32, int32_t) \
- V(F64SConvertI32, int32_t) \
- V(F64UConvertI32, uint32_t) \
- V(F64SConvertI64, int64_t) \
- V(F64UConvertI64, uint64_t) \
- V(F64ConvertF32, float) \
- V(F64ReinterpretI64, int64_t) \
- V(I32AsmjsSConvertF32, float) \
- V(I32AsmjsUConvertF32, float) \
- V(I32AsmjsSConvertF64, double) \
- V(I32AsmjsUConvertF64, double) \
- V(F32Sqrt, float) \
- V(F64Sqrt, double)
-
-namespace {
-
-constexpr uint32_t kFloat32SignBitMask = uint32_t{1} << 31;
-constexpr uint64_t kFloat64SignBitMask = uint64_t{1} << 63;
-
-inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
- *trap = kTrapDivUnrepresentable;
- return 0;
- }
- return a / b;
-}
-
-inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- return a / b;
-}
-
-inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- return a % b;
-}
-
-inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
- return a << (b & 0x1F);
-}
-
-inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b, TrapReason* trap) {
- return a >> (b & 0x1F);
-}
-
-inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
- return a >> (b & 0x1F);
-}
-
-inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
- *trap = kTrapDivUnrepresentable;
- return 0;
- }
- return a / b;
-}
-
-inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapDivByZero;
- return 0;
- }
- return a / b;
-}
-
-inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b, TrapReason* trap) {
- if (b == 0) {
- *trap = kTrapRemByZero;
- return 0;
- }
- return a % b;
-}
-
-inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
- return a << (b & 0x3F);
-}
-
-inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b, TrapReason* trap) {
- return a >> (b & 0x3F);
-}
-
-inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
- return a >> (b & 0x3F);
-}
-
-inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
- return (a >> (b & 0x1F)) | (a << ((32 - b) & 0x1F));
-}
-
-inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
- return (a << (b & 0x1F)) | (a >> ((32 - b) & 0x1F));
-}
-
-inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
- return (a >> (b & 0x3F)) | (a << ((64 - b) & 0x3F));
-}
-
-inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
- return (a << (b & 0x3F)) | (a >> ((64 - b) & 0x3F));
-}
-
-inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
- return JSMin(a, b);
-}
-
-inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
- return JSMax(a, b);
-}
-
-inline Float32 ExecuteF32CopySign(Float32 a, Float32 b, TrapReason* trap) {
- return Float32::FromBits((a.get_bits() & ~kFloat32SignBitMask) |
- (b.get_bits() & kFloat32SignBitMask));
-}
-
-inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
- return JSMin(a, b);
-}
-
-inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
- return JSMax(a, b);
-}
-
-inline Float64 ExecuteF64CopySign(Float64 a, Float64 b, TrapReason* trap) {
- return Float64::FromBits((a.get_bits() & ~kFloat64SignBitMask) |
- (b.get_bits() & kFloat64SignBitMask));
-}
-
-inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
- return std::numeric_limits<int32_t>::min();
- }
- return a / b;
-}
-
-inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- return a / b;
-}
-
-inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- if (b == -1) return 0;
- return a % b;
-}
-
-inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b, TrapReason* trap) {
- if (b == 0) return 0;
- return a % b;
-}
-
-inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
- return DoubleToInt32(a);
-}
-
-inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
- return DoubleToUint32(a);
-}
-
-inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
- return DoubleToInt32(a);
-}
-
-inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
- return DoubleToUint32(a);
-}
-
-int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros(val);
-}
-
-uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros(val);
-}
-
-uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
- return base::bits::CountPopulation(val);
-}
-
-inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
- return val == 0 ? 1 : 0;
-}
-
-int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
- return base::bits::CountLeadingZeros(val);
-}
-
-inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
- return base::bits::CountTrailingZeros(val);
-}
-
-inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
- return base::bits::CountPopulation(val);
-}
-
-inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
- return val == 0 ? 1 : 0;
-}
-
-inline Float32 ExecuteF32Abs(Float32 a, TrapReason* trap) {
- return Float32::FromBits(a.get_bits() & ~kFloat32SignBitMask);
-}
-
-inline Float32 ExecuteF32Neg(Float32 a, TrapReason* trap) {
- return Float32::FromBits(a.get_bits() ^ kFloat32SignBitMask);
-}
-
-inline float ExecuteF32Ceil(float a, TrapReason* trap) { return ceilf(a); }
-
-inline float ExecuteF32Floor(float a, TrapReason* trap) { return floorf(a); }
-
-inline float ExecuteF32Trunc(float a, TrapReason* trap) { return truncf(a); }
-
-inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
- return nearbyintf(a);
-}
-
-inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
- float result = sqrtf(a);
- return result;
-}
-
-inline Float64 ExecuteF64Abs(Float64 a, TrapReason* trap) {
- return Float64::FromBits(a.get_bits() & ~kFloat64SignBitMask);
-}
-
-inline Float64 ExecuteF64Neg(Float64 a, TrapReason* trap) {
- return Float64::FromBits(a.get_bits() ^ kFloat64SignBitMask);
-}
-
-inline double ExecuteF64Ceil(double a, TrapReason* trap) { return ceil(a); }
-
-inline double ExecuteF64Floor(double a, TrapReason* trap) { return floor(a); }
-
-inline double ExecuteF64Trunc(double a, TrapReason* trap) { return trunc(a); }
-
-inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
- return nearbyint(a);
-}
-
-inline double ExecuteF64Sqrt(double a, TrapReason* trap) { return sqrt(a); }
-
-template <typename int_type, typename float_type>
-int_type ExecuteConvert(float_type a, TrapReason* trap) {
- if (is_inbounds<int_type>(a)) {
- return static_cast<int_type>(a);
- }
- *trap = kTrapFloatUnrepresentable;
- return 0;
-}
-
-template <typename int_type, typename float_type>
-int_type ExecuteConvertSaturate(float_type a) {
- TrapReason base_trap = kTrapCount;
- int32_t val = ExecuteConvert<int_type>(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < static_cast<float_type>(0.0)
- ? std::numeric_limits<int_type>::min()
- : std::numeric_limits<int_type>::max());
-}
-
-template <typename dst_type, typename src_type, void (*fn)(Address)>
-inline dst_type CallExternalIntToFloatFunction(src_type input) {
- uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
- Address data_addr = reinterpret_cast<Address>(data);
- WriteUnalignedValue<src_type>(data_addr, input);
- fn(data_addr);
- return ReadUnalignedValue<dst_type>(data_addr);
-}
-
-template <typename dst_type, typename src_type, int32_t (*fn)(Address)>
-inline dst_type CallExternalFloatToIntFunction(src_type input,
- TrapReason* trap) {
- uint8_t data[std::max(sizeof(dst_type), sizeof(src_type))];
- Address data_addr = reinterpret_cast<Address>(data);
- WriteUnalignedValue<src_type>(data_addr, input);
- if (!fn(data_addr)) *trap = kTrapFloatUnrepresentable;
- return ReadUnalignedValue<dst_type>(data_addr);
-}
-
-inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<uint32_t>(a & 0xFFFFFFFF);
-}
-
-int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, float,
- float32_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
-}
-
-int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<int64_t, double,
- float64_to_int64_wrapper>(a, trap);
-}
-
-int64_t ExecuteI64SConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64SConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<int64_t>::min()
- : std::numeric_limits<int64_t>::max());
-}
-
-uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, float,
- float32_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF32(float a) {
- TrapReason base_trap = kTrapCount;
- uint64_t val = ExecuteI64UConvertF32(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
-}
-
-uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
- return CallExternalFloatToIntFunction<uint64_t, double,
- float64_to_uint64_wrapper>(a, trap);
-}
-
-uint64_t ExecuteI64UConvertSatF64(double a) {
- TrapReason base_trap = kTrapCount;
- int64_t val = ExecuteI64UConvertF64(a, &base_trap);
- if (base_trap == kTrapCount) {
- return val;
- }
- return std::isnan(a) ? 0
- : (a < 0.0 ? std::numeric_limits<uint64_t>::min()
- : std::numeric_limits<uint64_t>::max());
-}
-
-inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<int64_t>(a);
-}
-
-inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<uint64_t>(a);
-}
-
-inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<float>(a);
-}
-
-inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
- return CallExternalIntToFloatFunction<float, uint64_t,
- uint64_to_float32_wrapper>(a);
-}
-
-inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
- return DoubleToFloat32(a);
-}
-
-inline Float32 ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
- return Float32::FromBits(a);
-}
-
-inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
- return CallExternalIntToFloatFunction<double, uint64_t,
- uint64_to_float64_wrapper>(a);
-}
-
-inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
- return static_cast<double>(a);
-}
-
-inline Float64 ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
- return Float64::FromBits(a);
-}
-
-inline int32_t ExecuteI32ReinterpretF32(WasmValue a) {
- return a.to_f32_boxed().get_bits();
-}
-
-inline int64_t ExecuteI64ReinterpretF64(WasmValue a) {
- return a.to_f64_boxed().get_bits();
-}
-
-enum InternalOpcode {
-#define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
- FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
-#undef DECL_INTERNAL_ENUM
-};
-
-const char* OpcodeName(uint32_t val) {
- switch (val) {
-#define DECL_INTERNAL_CASE(name, value) \
- case kInternal##name: \
- return "Internal" #name;
- FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
-#undef DECL_INTERNAL_CASE
- }
- return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
-}
-
-constexpr int32_t kCatchInArity = 1;
-
-} // namespace
-
-class SideTable;
-
-// Code and metadata needed to execute a function.
-struct InterpreterCode {
- const WasmFunction* function; // wasm function
- BodyLocalDecls locals; // local declarations
- const byte* orig_start; // start of original code
- const byte* orig_end; // end of original code
- byte* start; // start of (maybe altered) code
- byte* end; // end of (maybe altered) code
- SideTable* side_table; // precomputed side table for control flow.
-
- const byte* at(pc_t pc) { return start + pc; }
-};
-
-// A helper class to compute the control transfers for each bytecode offset.
-// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
-// be directly executed without the need to dynamically track blocks.
-class SideTable : public ZoneObject {
- public:
- ControlTransferMap map_;
- int32_t max_stack_height_ = 0;
-
- SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
- : map_(zone) {
- // Create a zone for all temporary objects.
- Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
-
- // Represents a control flow label.
- class CLabel : public ZoneObject {
- explicit CLabel(Zone* zone, int32_t target_stack_height, uint32_t arity)
- : target_stack_height(target_stack_height),
- arity(arity),
- refs(zone) {}
-
- public:
- struct Ref {
- const byte* from_pc;
- const int32_t stack_height;
- };
- const byte* target = nullptr;
- int32_t target_stack_height;
- // Arity when branching to this label.
- const uint32_t arity;
- ZoneVector<Ref> refs;
-
- static CLabel* New(Zone* zone, int32_t stack_height, uint32_t arity) {
- return new (zone) CLabel(zone, stack_height, arity);
- }
-
- // Bind this label to the given PC.
- void Bind(const byte* pc) {
- DCHECK_NULL(target);
- target = pc;
- }
-
- // Reference this label from the given location.
- void Ref(const byte* from_pc, int32_t stack_height) {
- // Target being bound before a reference means this is a loop.
- DCHECK_IMPLIES(target, *target == kExprLoop);
- refs.push_back({from_pc, stack_height});
- }
-
- void Finish(ControlTransferMap* map, const byte* start) {
- DCHECK_NOT_NULL(target);
- for (auto ref : refs) {
- size_t offset = static_cast<size_t>(ref.from_pc - start);
- auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
- DCHECK_GE(ref.stack_height, target_stack_height);
- spdiff_t spdiff =
- static_cast<spdiff_t>(ref.stack_height - target_stack_height);
- TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
- pcdiff, ref.stack_height, target_stack_height, spdiff);
- ControlTransferEntry& entry = (*map)[offset];
- entry.pc_diff = pcdiff;
- entry.sp_diff = spdiff;
- entry.target_arity = arity;
- }
- }
- };
-
- // An entry in the control stack.
- struct Control {
- const byte* pc;
- CLabel* end_label;
- CLabel* else_label;
- // Arity (number of values on the stack) when exiting this control
- // structure via |end|.
- uint32_t exit_arity;
- // Track whether this block was already left, i.e. all further
- // instructions are unreachable.
- bool unreachable = false;
-
- Control(const byte* pc, CLabel* end_label, CLabel* else_label,
- uint32_t exit_arity)
- : pc(pc),
- end_label(end_label),
- else_label(else_label),
- exit_arity(exit_arity) {}
- Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
- : Control(pc, end_label, nullptr, exit_arity) {}
-
- void Finish(ControlTransferMap* map, const byte* start) {
- end_label->Finish(map, start);
- if (else_label) else_label->Finish(map, start);
- }
- };
-
- // Compute the ControlTransfer map.
- // This algorithm maintains a stack of control constructs similar to the
- // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
- // bytecodes with their target, as well as determining whether the current
- // bytecodes are within the true or false block of an else.
- ZoneVector<Control> control_stack(&control_transfer_zone);
- // It also maintains a stack of all nested {try} blocks to resolve local
- // handler targets for potentially throwing operations. These exceptional
- // control transfers are treated just like other branches in the resulting
- // map. This stack contains indices into the above control stack.
- ZoneVector<size_t> exception_stack(zone);
- int32_t stack_height = 0;
- uint32_t func_arity =
- static_cast<uint32_t>(code->function->sig->return_count());
- CLabel* func_label =
- CLabel::New(&control_transfer_zone, stack_height, func_arity);
- control_stack.emplace_back(code->orig_start, func_label, func_arity);
- auto control_parent = [&]() -> Control& {
- DCHECK_LE(2, control_stack.size());
- return control_stack[control_stack.size() - 2];
- };
- auto copy_unreachable = [&] {
- control_stack.back().unreachable = control_parent().unreachable;
- };
- for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
- i.has_next(); i.next()) {
- WasmOpcode opcode = i.current();
- int32_t exceptional_stack_height = 0;
- if (WasmOpcodes::IsPrefixOpcode(opcode)) opcode = i.prefixed_opcode();
- bool unreachable = control_stack.back().unreachable;
- if (unreachable) {
- TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode));
- } else {
- auto stack_effect =
- StackEffect(module, code->function->sig, i.pc(), i.end());
- TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
- stack_effect.second);
- DCHECK_GE(stack_height, stack_effect.first);
- DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
- stack_effect.first + stack_effect.second);
- exceptional_stack_height = stack_height - stack_effect.first;
- stack_height = stack_height - stack_effect.first + stack_effect.second;
- if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
- }
- if (!exception_stack.empty() && WasmOpcodes::IsThrowingOpcode(opcode)) {
- // Record exceptional control flow from potentially throwing opcodes to
- // the local handler if one is present. The stack height at the throw
- // point is assumed to have popped all operands and not pushed any yet.
- DCHECK_GE(control_stack.size() - 1, exception_stack.back());
- const Control* c = &control_stack[exception_stack.back()];
- if (!unreachable) c->else_label->Ref(i.pc(), exceptional_stack_height);
- if (exceptional_stack_height + kCatchInArity > max_stack_height_) {
- max_stack_height_ = exceptional_stack_height + kCatchInArity;
- }
- TRACE("handler @%u: %s -> try @%u\n", i.pc_offset(), OpcodeName(opcode),
- static_cast<uint32_t>(c->pc - code->start));
- }
- switch (opcode) {
- case kExprBlock:
- case kExprLoop: {
- bool is_loop = opcode == kExprLoop;
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: %s, arity %d->%d\n", i.pc_offset(),
- is_loop ? "Loop" : "Block", imm.in_arity(), imm.out_arity());
- CLabel* label =
- CLabel::New(&control_transfer_zone, stack_height - imm.in_arity(),
- is_loop ? imm.in_arity() : imm.out_arity());
- control_stack.emplace_back(i.pc(), label, imm.out_arity());
- copy_unreachable();
- if (is_loop) label->Bind(i.pc());
- break;
- }
- case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: If, arity %d->%d\n", i.pc_offset(),
- imm.in_arity(), imm.out_arity());
- CLabel* end_label =
- CLabel::New(&control_transfer_zone, stack_height - imm.in_arity(),
- imm.out_arity());
- CLabel* else_label =
- CLabel::New(&control_transfer_zone, stack_height, 0);
- control_stack.emplace_back(i.pc(), end_label, else_label,
- imm.out_arity());
- copy_unreachable();
- if (!unreachable) else_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprElse: {
- Control* c = &control_stack.back();
- copy_unreachable();
- TRACE("control @%u: Else\n", i.pc_offset());
- if (!control_parent().unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
- DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(i.pc() + 1);
- c->else_label->Finish(&map_, code->orig_start);
- stack_height = c->else_label->target_stack_height;
- c->else_label = nullptr;
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- break;
- }
- case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(), &i,
- i.pc());
- if (imm.type == kWasmBottom) {
- imm.sig = module->signature(imm.sig_index);
- }
- TRACE("control @%u: Try, arity %d->%d\n", i.pc_offset(),
- imm.in_arity(), imm.out_arity());
- CLabel* end_label = CLabel::New(&control_transfer_zone, stack_height,
- imm.out_arity());
- CLabel* catch_label =
- CLabel::New(&control_transfer_zone, stack_height, kCatchInArity);
- control_stack.emplace_back(i.pc(), end_label, catch_label,
- imm.out_arity());
- exception_stack.push_back(control_stack.size() - 1);
- copy_unreachable();
- break;
- }
- case kExprCatch: {
- DCHECK_EQ(control_stack.size() - 1, exception_stack.back());
- Control* c = &control_stack.back();
- exception_stack.pop_back();
- copy_unreachable();
- TRACE("control @%u: Catch\n", i.pc_offset());
- if (!control_parent().unreachable) {
- c->end_label->Ref(i.pc(), stack_height);
- }
- DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(i.pc() + 1);
- c->else_label->Finish(&map_, code->orig_start);
- c->else_label = nullptr;
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- stack_height = c->end_label->target_stack_height + kCatchInArity;
- break;
- }
- case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- uint32_t depth = imm.depth.depth; // Extracted for convenience.
- imm.index.exception = &module->exceptions[imm.index.index];
- DCHECK_EQ(0, imm.index.exception->sig->return_count());
- size_t params = imm.index.exception->sig->parameter_count();
- // Taken branches pop the exception and push the encoded values.
- int32_t height = stack_height - 1 + static_cast<int32_t>(params);
- TRACE("control @%u: BrOnExn[depth=%u]\n", i.pc_offset(), depth);
- Control* c = &control_stack[control_stack.size() - depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), height);
- break;
- }
- case kExprEnd: {
- Control* c = &control_stack.back();
- TRACE("control @%u: End\n", i.pc_offset());
- // Only loops have bound labels.
- DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
- if (!c->end_label->target) {
- if (c->else_label) c->else_label->Bind(i.pc());
- c->end_label->Bind(i.pc() + 1);
- }
- c->Finish(&map_, code->orig_start);
- DCHECK_IMPLIES(!unreachable,
- stack_height >= c->end_label->target_stack_height);
- stack_height = c->end_label->target_stack_height + c->exit_arity;
- control_stack.pop_back();
- break;
- }
- case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), imm.depth);
- Control* c = &control_stack[control_stack.size() - imm.depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), imm.depth);
- Control* c = &control_stack[control_stack.size() - imm.depth - 1];
- if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
- break;
- }
- case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&i, i.pc());
- BranchTableIterator<Decoder::kNoValidate> iterator(&i, imm);
- TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
- imm.table_count);
- if (!unreachable) {
- while (iterator.has_next()) {
- uint32_t j = iterator.cur_index();
- uint32_t target = iterator.next();
- Control* c = &control_stack[control_stack.size() - target - 1];
- c->end_label->Ref(i.pc() + j, stack_height);
- }
- }
- break;
- }
- default:
- break;
- }
- if (WasmOpcodes::IsUnconditionalJump(opcode)) {
- control_stack.back().unreachable = true;
- }
- }
- DCHECK_EQ(0, control_stack.size());
- DCHECK_EQ(func_arity, stack_height);
- }
-
- bool HasEntryAt(pc_t from) {
- auto result = map_.find(from);
- return result != map_.end();
- }
-
- ControlTransferEntry& Lookup(pc_t from) {
- auto result = map_.find(from);
- DCHECK(result != map_.end());
- return result->second;
- }
-};
-
-// The main storage for interpreter code. It maps {WasmFunction} to the
-// metadata needed to execute each function.
-class CodeMap {
- Zone* zone_;
- const WasmModule* module_;
- ZoneVector<InterpreterCode> interpreter_code_;
-
- public:
- CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
- : zone_(zone), module_(module), interpreter_code_(zone) {
- if (module == nullptr) return;
- interpreter_code_.reserve(module->functions.size());
- for (const WasmFunction& function : module->functions) {
- if (function.imported) {
- DCHECK(!function.code.is_set());
- AddFunction(&function, nullptr, nullptr);
- } else {
- AddFunction(&function, module_start + function.code.offset(),
- module_start + function.code.end_offset());
- }
- }
- }
-
- const WasmModule* module() const { return module_; }
-
- InterpreterCode* GetCode(const WasmFunction* function) {
- InterpreterCode* code = GetCode(function->func_index);
- DCHECK_EQ(function, code->function);
- return code;
- }
-
- InterpreterCode* GetCode(uint32_t function_index) {
- DCHECK_LT(function_index, interpreter_code_.size());
- return Preprocess(&interpreter_code_[function_index]);
- }
-
- InterpreterCode* Preprocess(InterpreterCode* code) {
- DCHECK_EQ(code->function->imported, code->start == nullptr);
- if (!code->side_table && code->start) {
- // Compute the control targets map and the local declarations.
- code->side_table = new (zone_) SideTable(zone_, module_, code);
- }
- return code;
- }
-
- void AddFunction(const WasmFunction* function, const byte* code_start,
- const byte* code_end) {
- InterpreterCode code = {
- function, BodyLocalDecls(zone_), code_start,
- code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
- nullptr};
-
- DCHECK_EQ(interpreter_code_.size(), function->func_index);
- interpreter_code_.push_back(code);
- }
-
- void SetFunctionCode(const WasmFunction* function, const byte* start,
- const byte* end) {
- DCHECK_LT(function->func_index, interpreter_code_.size());
- InterpreterCode* code = &interpreter_code_[function->func_index];
- DCHECK_EQ(function, code->function);
- code->orig_start = start;
- code->orig_end = end;
- code->start = const_cast<byte*>(start);
- code->end = const_cast<byte*>(end);
- code->side_table = nullptr;
- Preprocess(code);
- }
-};
-
-namespace {
-
-struct ExternalCallResult {
- enum Type {
- // The function should be executed inside this interpreter.
- INTERNAL,
- // For indirect calls: Table or function does not exist.
- INVALID_FUNC,
- // For indirect calls: Signature does not match expected signature.
- SIGNATURE_MISMATCH,
- // The function was executed and returned normally.
- EXTERNAL_RETURNED,
- // The function was executed, threw an exception, and the stack was unwound.
- EXTERNAL_UNWOUND,
- // The function was executed and threw an exception that was locally caught.
- EXTERNAL_CAUGHT
- };
- Type type;
- // If type is INTERNAL, this field holds the function to call internally.
- InterpreterCode* interpreter_code;
-
- ExternalCallResult(Type type) : type(type) { // NOLINT
- DCHECK_NE(INTERNAL, type);
- }
- ExternalCallResult(Type type, InterpreterCode* code)
- : type(type), interpreter_code(code) {
- DCHECK_EQ(INTERNAL, type);
- }
-};
-
-// Like a static_cast from src to dst, but specialized for boxed floats.
-template <typename dst, typename src>
-struct converter {
- dst operator()(src val) const { return static_cast<dst>(val); }
-};
-template <>
-struct converter<Float64, uint64_t> {
- Float64 operator()(uint64_t val) const { return Float64::FromBits(val); }
-};
-template <>
-struct converter<Float32, uint32_t> {
- Float32 operator()(uint32_t val) const { return Float32::FromBits(val); }
-};
-template <>
-struct converter<uint64_t, Float64> {
- uint64_t operator()(Float64 val) const { return val.get_bits(); }
-};
-template <>
-struct converter<uint32_t, Float32> {
- uint32_t operator()(Float32 val) const { return val.get_bits(); }
-};
-
-template <typename T>
-V8_INLINE bool has_nondeterminism(T val) {
- static_assert(!std::is_floating_point<T>::value, "missing specialization");
- return false;
-}
-template <>
-V8_INLINE bool has_nondeterminism<float>(float val) {
- return std::isnan(val);
-}
-template <>
-V8_INLINE bool has_nondeterminism<double>(double val) {
- return std::isnan(val);
-}
-
-} // namespace
-
-// Responsible for executing code directly.
-class ThreadImpl {
- struct Activation {
- uint32_t fp;
- sp_t sp;
- Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
- };
-
- public:
- // The {ReferenceStackScope} sets up the reference stack in the interpreter.
- // The handle to the reference stack has to be re-initialized everytime we
- // call into the interpreter because there is no HandleScope that could
- // contain that handle. A global handle is not an option because it can lead
- // to a memory leak if a reference to the {WasmInstanceObject} is put onto the
- // reference stack and thereby transitively keeps the interpreter alive.
- class ReferenceStackScope {
- public:
- explicit ReferenceStackScope(ThreadImpl* impl) : impl_(impl) {
- // The reference stack is already initialized, we don't have to do
- // anything.
- if (!impl_->reference_stack_cell_.is_null()) return;
- impl_->reference_stack_cell_ = handle(
- impl_->instance_object_->debug_info().interpreter_reference_stack(),
- impl_->isolate_);
- // We initialized the reference stack, so we also have to reset it later.
- do_reset_stack_ = true;
- }
-
- ~ReferenceStackScope() {
- if (do_reset_stack_) {
- impl_->reference_stack_cell_ = Handle<Cell>();
- }
- }
-
- private:
- ThreadImpl* impl_;
- bool do_reset_stack_ = false;
- };
-
- ThreadImpl(Zone* zone, CodeMap* codemap,
- Handle<WasmInstanceObject> instance_object)
- : codemap_(codemap),
- isolate_(instance_object->GetIsolate()),
- instance_object_(instance_object),
- frames_(zone),
- activations_(zone) {}
-
- //==========================================================================
- // Implementation of public interface for WasmInterpreter::Thread.
- //==========================================================================
-
- WasmInterpreter::State state() { return state_; }
-
- void InitFrame(const WasmFunction* function, WasmValue* args) {
- DCHECK_EQ(current_activation().fp, frames_.size());
- InterpreterCode* code = codemap()->GetCode(function);
- size_t num_params = function->sig->parameter_count();
- EnsureStackSpace(num_params);
- Push(args, num_params);
- PushFrame(code);
- }
-
- WasmInterpreter::State Run(int num_steps = -1) {
- DCHECK(state_ == WasmInterpreter::STOPPED ||
- state_ == WasmInterpreter::PAUSED);
- DCHECK(num_steps == -1 || num_steps > 0);
- if (num_steps == -1) {
- TRACE(" => Run()\n");
- } else if (num_steps == 1) {
- TRACE(" => Step()\n");
- } else {
- TRACE(" => Run(%d)\n", num_steps);
- }
- state_ = WasmInterpreter::RUNNING;
- Execute(frames_.back().code, frames_.back().pc, num_steps);
- // If state_ is STOPPED, the current activation must be fully unwound.
- DCHECK_IMPLIES(state_ == WasmInterpreter::STOPPED,
- current_activation().fp == frames_.size());
- return state_;
- }
-
- void Pause() { UNIMPLEMENTED(); }
-
- void Reset() {
- TRACE("----- RESET -----\n");
- ResetStack(0);
- frames_.clear();
- state_ = WasmInterpreter::STOPPED;
- trap_reason_ = kTrapCount;
- possible_nondeterminism_ = false;
- }
-
- int GetFrameCount() {
- DCHECK_GE(kMaxInt, frames_.size());
- return static_cast<int>(frames_.size());
- }
-
- WasmValue GetReturnValue(uint32_t index) {
- if (state_ == WasmInterpreter::TRAPPED) return WasmValue(0xDEADBEEF);
- DCHECK_EQ(WasmInterpreter::FINISHED, state_);
- Activation act = current_activation();
- // Current activation must be finished.
- DCHECK_EQ(act.fp, frames_.size());
- return GetStackValue(act.sp + index);
- }
-
- WasmValue GetStackValue(sp_t index) {
- DCHECK_GT(StackHeight(), index);
- return stack_[index].ExtractValue(this, index);
- }
-
- void SetStackValue(sp_t index, WasmValue value) {
- DCHECK_GT(StackHeight(), index);
- stack_[index] = StackValue(value, this, index);
- }
-
- TrapReason GetTrapReason() { return trap_reason_; }
-
- pc_t GetBreakpointPc() { return break_pc_; }
-
- bool PossibleNondeterminism() { return possible_nondeterminism_; }
-
- uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
-
- void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
-
- void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
-
- Handle<Cell> reference_stack_cell() const { return reference_stack_cell_; }
-
- uint32_t NumActivations() {
- return static_cast<uint32_t>(activations_.size());
- }
-
- uint32_t StartActivation() {
- TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
- // If you use activations, use them consistently:
- DCHECK_IMPLIES(activations_.empty(), frames_.empty());
- DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
- uint32_t activation_id = static_cast<uint32_t>(activations_.size());
- activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
- StackHeight());
- state_ = WasmInterpreter::STOPPED;
- return activation_id;
- }
-
- void FinishActivation(uint32_t id) {
- TRACE("----- FINISH ACTIVATION %zu -----\n", activations_.size() - 1);
- DCHECK_LT(0, activations_.size());
- DCHECK_EQ(activations_.size() - 1, id);
- // Stack height must match the start of this activation (otherwise unwind
- // first).
- DCHECK_EQ(activations_.back().fp, frames_.size());
- DCHECK_LE(activations_.back().sp, StackHeight());
- ResetStack(activations_.back().sp);
- activations_.pop_back();
- }
-
- uint32_t ActivationFrameBase(uint32_t id) {
- DCHECK_GT(activations_.size(), id);
- return activations_[id].fp;
- }
-
- WasmInterpreter::Thread::ExceptionHandlingResult RaiseException(
- Isolate* isolate, Handle<Object> exception) {
- DCHECK_EQ(WasmInterpreter::TRAPPED, state_);
- isolate->Throw(*exception); // Will check that none is pending.
- if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
- DCHECK_EQ(WasmInterpreter::STOPPED, state_);
- return WasmInterpreter::Thread::UNWOUND;
- }
- state_ = WasmInterpreter::PAUSED;
- return WasmInterpreter::Thread::HANDLED;
- }
-
- private:
- // Handle a thrown exception. Returns whether the exception was handled inside
- // the current activation. Unwinds the interpreted stack accordingly.
- WasmInterpreter::Thread::ExceptionHandlingResult HandleException(
- Isolate* isolate) {
- DCHECK(isolate->has_pending_exception());
- bool catchable =
- isolate->is_catchable_by_wasm(isolate->pending_exception());
- DCHECK_LT(0, activations_.size());
- Activation& act = activations_.back();
- while (frames_.size() > act.fp) {
- Frame& frame = frames_.back();
- InterpreterCode* code = frame.code;
- if (catchable && code->side_table->HasEntryAt(frame.pc)) {
- TRACE("----- HANDLE -----\n");
- Push(WasmValue(handle(isolate->pending_exception(), isolate)));
- isolate->clear_pending_exception();
- frame.pc += JumpToHandlerDelta(code, frame.pc);
- TRACE(" => handler #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frame.pc);
- return WasmInterpreter::Thread::HANDLED;
- }
- TRACE(" => drop frame #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frame.pc);
- ResetStack(frame.sp);
- frames_.pop_back();
- }
- TRACE("----- UNWIND -----\n");
- DCHECK_EQ(act.fp, frames_.size());
- DCHECK_EQ(act.sp, StackHeight());
- state_ = WasmInterpreter::STOPPED;
- return WasmInterpreter::Thread::UNWOUND;
- }
-
- // Entries on the stack of functions being evaluated.
- struct Frame {
- InterpreterCode* code;
- pc_t pc;
- sp_t sp;
-
- // Limit of parameters.
- sp_t plimit() { return sp + code->function->sig->parameter_count(); }
- // Limit of locals.
- sp_t llimit() { return plimit() + code->locals.type_list.size(); }
- };
-
- // Safety wrapper for values on the operand stack represented as {WasmValue}.
- // Most values are stored directly on the stack, only reference values are
- // kept in a separate on-heap reference stack to make the GC trace them.
- // TODO(wasm): Optimize simple stack operations (like "get_local",
- // "set_local", and "tee_local") so that they don't require a handle scope.
- // TODO(wasm): Consider optimizing activations that use no reference
- // values to avoid allocating the reference stack entirely.
- class StackValue {
- public:
- StackValue() = default; // Only needed for resizing the stack.
- StackValue(WasmValue v, ThreadImpl* thread, sp_t index) : value_(v) {
- if (IsReferenceValue()) {
- value_ = WasmValue(Handle<Object>::null());
- int ref_index = static_cast<int>(index);
- thread->reference_stack().set(ref_index, *v.to_anyref());
- }
- }
-
- WasmValue ExtractValue(ThreadImpl* thread, sp_t index) {
- if (!IsReferenceValue()) return value_;
- DCHECK(value_.to_anyref().is_null());
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- Handle<Object> ref(thread->reference_stack().get(ref_index), isolate);
- DCHECK(!ref->IsTheHole(isolate));
- return WasmValue(ref);
- }
-
- bool IsReferenceValue() const { return value_.type() == kWasmAnyRef; }
-
- void ClearValue(ThreadImpl* thread, sp_t index) {
- if (!IsReferenceValue()) return;
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- thread->reference_stack().set_the_hole(isolate, ref_index);
- }
-
- static void ClearValues(ThreadImpl* thread, sp_t index, int count) {
- int ref_index = static_cast<int>(index);
- thread->reference_stack().FillWithHoles(ref_index, ref_index + count);
- }
-
- static bool IsClearedValue(ThreadImpl* thread, sp_t index) {
- int ref_index = static_cast<int>(index);
- Isolate* isolate = thread->isolate_;
- return thread->reference_stack().is_the_hole(isolate, ref_index);
- }
-
- private:
- WasmValue value_;
- };
-
- friend class InterpretedFrameImpl;
- friend class ReferenceStackScope;
-
- CodeMap* codemap_;
- Isolate* isolate_;
- Handle<WasmInstanceObject> instance_object_;
- std::unique_ptr<StackValue[]> stack_;
- StackValue* stack_limit_ = nullptr; // End of allocated stack space.
- StackValue* sp_ = nullptr; // Current stack pointer.
- // The reference stack is pointed to by a {Cell} to be able to replace the
- // underlying {FixedArray} when growing the stack. This avoids having to
- // recreate or update the global handle keeping this object alive.
- Handle<Cell> reference_stack_cell_; // References are on an on-heap stack.
- ZoneVector<Frame> frames_;
- WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
- pc_t break_pc_ = kInvalidPc;
- TrapReason trap_reason_ = kTrapCount;
- bool possible_nondeterminism_ = false;
- uint8_t break_flags_ = 0; // a combination of WasmInterpreter::BreakFlag
- uint64_t num_interpreted_calls_ = 0;
- // Store the stack height of each activation (for unwind and frame
- // inspection).
- ZoneVector<Activation> activations_;
-
- CodeMap* codemap() const { return codemap_; }
- const WasmModule* module() const { return codemap_->module(); }
- FixedArray reference_stack() const {
- return FixedArray::cast(reference_stack_cell_->value());
- }
-
- void DoTrap(TrapReason trap, pc_t pc) {
- TRACE("TRAP: %s\n", WasmOpcodes::TrapReasonMessage(trap));
- state_ = WasmInterpreter::TRAPPED;
- trap_reason_ = trap;
- CommitPc(pc);
- }
-
- // Check if there is room for a function's activation.
- void EnsureStackSpaceForCall(InterpreterCode* code) {
- EnsureStackSpace(code->side_table->max_stack_height_ +
- code->locals.type_list.size());
- DCHECK_GE(StackHeight(), code->function->sig->parameter_count());
- }
-
- // Push a frame with arguments already on the stack.
- void PushFrame(InterpreterCode* code) {
- DCHECK_NOT_NULL(code);
- DCHECK_NOT_NULL(code->side_table);
- EnsureStackSpaceForCall(code);
-
- ++num_interpreted_calls_;
- size_t arity = code->function->sig->parameter_count();
- // The parameters will overlap the arguments already on the stack.
- DCHECK_GE(StackHeight(), arity);
-
- frames_.push_back({code, 0, StackHeight() - arity});
- frames_.back().pc = InitLocals(code);
- TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
- code->function->func_index, frames_.back().pc);
- }
-
- pc_t InitLocals(InterpreterCode* code) {
- for (ValueType p : code->locals.type_list) {
- WasmValue val;
- switch (p.kind()) {
-#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: \
- val = WasmValue(ctype{}); \
- break;
- FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
-#undef CASE_TYPE
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- val = WasmValue(isolate_->factory()->null_value());
- break;
- }
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- break;
- }
- Push(val);
- }
- return code->locals.encoded_size;
- }
-
- void CommitPc(pc_t pc) {
- DCHECK(!frames_.empty());
- frames_.back().pc = pc;
- }
-
- bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
- if (pc == break_pc_) {
- // Skip the previously hit breakpoint when resuming.
- break_pc_ = kInvalidPc;
- return true;
- }
- return false;
- }
-
- void ReloadFromFrameOnException(Decoder* decoder, InterpreterCode** code,
- pc_t* pc, pc_t* limit) {
- Frame* top = &frames_.back();
- *code = top->code;
- *pc = top->pc;
- *limit = top->code->end - top->code->start;
- decoder->Reset(top->code->start, top->code->end);
- }
-
- int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
- return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
- }
-
- int JumpToHandlerDelta(InterpreterCode* code, pc_t pc) {
- ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(control_transfer_entry.sp_diff + kCatchInArity,
- control_transfer_entry.target_arity);
- return control_transfer_entry.pc_diff;
- }
-
- int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
- ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
- DoStackTransfer(control_transfer_entry.sp_diff,
- control_transfer_entry.target_arity);
- return control_transfer_entry.pc_diff;
- }
-
- pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
- switch (code->orig_start[pc]) {
- case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- return pc + 1 + imm.length;
- }
- case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- decoder, code->at(pc));
- return pc + 1 + imm.length;
- }
- default:
- UNREACHABLE();
- }
- }
-
- bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
- size_t arity) {
- DCHECK_GT(frames_.size(), 0);
- spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - frames_.back().sp);
- frames_.pop_back();
- if (frames_.size() == current_activation().fp) {
- // A return from the last frame terminates the execution.
- state_ = WasmInterpreter::FINISHED;
- DoStackTransfer(sp_diff, arity);
- TRACE(" => finish\n");
- return false;
- } else {
- // Return to caller frame.
- Frame* top = &frames_.back();
- *code = top->code;
- decoder->Reset((*code)->start, (*code)->end);
- *pc = ReturnPc(decoder, *code, top->pc);
- *limit = top->code->end - top->code->start;
- TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
- (*code)->function->func_index, *pc);
- DoStackTransfer(sp_diff, arity);
- return true;
- }
- }
-
- // Returns true if the call was successful, false if the stack check failed
- // and the current activation was fully unwound.
- bool DoCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
- pc_t* limit) V8_WARN_UNUSED_RESULT {
- frames_.back().pc = *pc;
- PushFrame(target);
- if (!DoStackCheck()) return false;
- *pc = frames_.back().pc;
- *limit = target->end - target->start;
- decoder->Reset(target->start, target->end);
- return true;
- }
-
- // Returns true if the tail call was successful, false if the stack check
- // failed.
- bool DoReturnCall(Decoder* decoder, InterpreterCode* target, pc_t* pc,
- pc_t* limit) V8_WARN_UNUSED_RESULT {
- DCHECK_NOT_NULL(target);
- DCHECK_NOT_NULL(target->side_table);
- EnsureStackSpaceForCall(target);
-
- ++num_interpreted_calls_;
-
- Frame* top = &frames_.back();
-
- // Drop everything except current parameters.
- spdiff_t sp_diff = static_cast<spdiff_t>(StackHeight() - top->sp);
- size_t arity = target->function->sig->parameter_count();
-
- DoStackTransfer(sp_diff, arity);
-
- *limit = target->end - target->start;
- decoder->Reset(target->start, target->end);
-
- // Rebuild current frame to look like a call to callee.
- top->code = target;
- top->pc = 0;
- top->sp = StackHeight() - arity;
- top->pc = InitLocals(target);
-
- *pc = top->pc;
-
- TRACE(" => ReturnCall #%zu (#%u @%zu)\n", frames_.size() - 1,
- target->function->func_index, top->pc);
-
- return true;
- }
-
- // Copies {arity} values on the top of the stack down the stack while also
- // dropping {sp_diff} many stack values in total from the stack.
- void DoStackTransfer(spdiff_t sp_diff, size_t arity) {
- // before: |---------------| pop_count | arity |
- // ^ 0 ^ dest ^ src ^ StackHeight()
- // ^----< sp_diff >----^
- //
- // after: |---------------| arity |
- // ^ 0 ^ StackHeight()
- sp_t stack_height = StackHeight();
- sp_t dest = stack_height - sp_diff;
- sp_t src = stack_height - arity;
- DCHECK_LE(dest, stack_height);
- DCHECK_LE(dest, src);
- if (arity && (dest != src)) {
- StackValue* stack = stack_.get();
- memmove(stack + dest, stack + src, arity * sizeof(StackValue));
- // Also move elements on the reference stack accordingly.
- reference_stack().MoveElements(
- isolate_, static_cast<int>(dest), static_cast<int>(src),
- static_cast<int>(arity), UPDATE_WRITE_BARRIER);
- }
- ResetStack(dest + arity);
- }
-
- inline Address EffectiveAddress(uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return reinterpret_cast<Address>(instance_object_->memory_start()) +
- (index & instance_object_->memory_mask());
- }
-
- template <typename mtype>
- inline Address BoundsCheckMem(uint32_t offset, uint32_t index) {
- uint32_t effective_index = offset + index;
- if (effective_index < index) {
- return kNullAddress; // wraparound => oob
- }
- if (!base::IsInBounds(effective_index, sizeof(mtype),
- instance_object_->memory_size())) {
- return kNullAddress; // oob
- }
- return EffectiveAddress(effective_index);
- }
-
- inline bool BoundsCheckMemRange(uint32_t index, uint32_t* size,
- Address* out_address) {
- bool ok = base::ClampToBounds(
- index, size, static_cast<uint32_t>(instance_object_->memory_size()));
- *out_address = EffectiveAddress(index);
- return ok;
- }
-
- template <typename ctype, typename mtype>
- bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep,
- int prefix_len = 0) {
- // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
- // the memarg is 1 byte from pc. We don't increment pc at the caller,
- // because we want to keep pc to the start of the operation to keep trap
- // reporting and tracing accurate, otherwise those will report at the middle
- // of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc + prefix_len), sizeof(ctype));
- uint32_t index = Pop().to<uint32_t>();
- Address addr = BoundsCheckMem<mtype>(imm.offset, index);
- if (!addr) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- WasmValue result(
- converter<ctype, mtype>{}(ReadLittleEndianValue<mtype>(addr)));
-
- Push(result);
- *len += imm.length;
-
- if (FLAG_trace_wasm_memory) {
- MemoryTracingInfo info(imm.offset + index, false, rep);
- TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
- code->function->func_index, static_cast<int>(pc),
- instance_object_->memory_start());
- }
-
- return true;
- }
-
- template <typename ctype, typename mtype>
- bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep,
- int prefix_len = 0) {
- // Some opcodes have a prefix byte, and MemoryAccessImmediate assumes that
- // the memarg is 1 byte from pc. We don't increment pc at the caller,
- // because we want to keep pc to the start of the operation to keep trap
- // reporting and tracing accurate, otherwise those will report at the middle
- // of an opcode.
- MemoryAccessImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc + prefix_len), sizeof(ctype));
- ctype val = Pop().to<ctype>();
-
- uint32_t index = Pop().to<uint32_t>();
- Address addr = BoundsCheckMem<mtype>(imm.offset, index);
- if (!addr) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- WriteLittleEndianValue<mtype>(addr, converter<mtype, ctype>{}(val));
- *len += imm.length;
-
- if (FLAG_trace_wasm_memory) {
- MemoryTracingInfo info(imm.offset + index, true, rep);
- TraceMemoryOperation(ExecutionTier::kInterpreter, &info,
- code->function->func_index, static_cast<int>(pc),
- instance_object_->memory_start());
- }
-
- return true;
- }
-
- template <typename type, typename op_type>
- bool ExtractAtomicOpParams(Decoder* decoder, InterpreterCode* code,
- Address* address, pc_t pc, int* const len,
- type* val = nullptr, type* val2 = nullptr) {
- MemoryAccessImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc + 1),
- sizeof(type));
- if (val2) *val2 = static_cast<type>(Pop().to<op_type>());
- if (val) *val = static_cast<type>(Pop().to<op_type>());
- uint32_t index = Pop().to<uint32_t>();
- *address = BoundsCheckMem<type>(imm.offset, index);
- if (!*address) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- if (!IsAligned(*address, sizeof(type))) {
- DoTrap(kTrapUnalignedAccess, pc);
- return false;
- }
- *len += imm.length;
- return true;
- }
-
- template <typename type>
- bool ExtractAtomicWaitNotifyParams(Decoder* decoder, InterpreterCode* code,
- pc_t pc, int* const len,
- uint32_t* buffer_offset, type* val,
- int64_t* timeout = nullptr) {
- MemoryAccessImmediate<Decoder::kValidate> imm(decoder, code->at(pc + 1),
- sizeof(type));
- if (timeout) {
- *timeout = Pop().to<int64_t>();
- }
- *val = Pop().to<type>();
- auto index = Pop().to<uint32_t>();
- // Check bounds.
- Address address = BoundsCheckMem<uint32_t>(imm.offset, index);
- *buffer_offset = index + imm.offset;
- if (!address) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- // Check alignment.
- const uint32_t align_mask = sizeof(type) - 1;
- if ((*buffer_offset & align_mask) != 0) {
- DoTrap(kTrapUnalignedAccess, pc);
- return false;
- }
- *len += imm.length;
- return true;
- }
-
- bool ExecuteNumericOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int* const len) {
- switch (opcode) {
- case kExprI32SConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<float>())));
- return true;
- case kExprI32UConvertSatF32:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<float>())));
- return true;
- case kExprI32SConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<int32_t>(Pop().to<double>())));
- return true;
- case kExprI32UConvertSatF64:
- Push(WasmValue(ExecuteConvertSaturate<uint32_t>(Pop().to<double>())));
- return true;
- case kExprI64SConvertSatF32:
- Push(WasmValue(ExecuteI64SConvertSatF32(Pop().to<float>())));
- return true;
- case kExprI64UConvertSatF32:
- Push(WasmValue(ExecuteI64UConvertSatF32(Pop().to<float>())));
- return true;
- case kExprI64SConvertSatF64:
- Push(WasmValue(ExecuteI64SConvertSatF64(Pop().to<double>())));
- return true;
- case kExprI64UConvertSatF64:
- Push(WasmValue(ExecuteI64UConvertSatF64(Pop().to<double>())));
- return true;
- case kExprMemoryInit: {
- MemoryInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- // The data segment index must be in bounds since it is required by
- // validation.
- DCHECK_LT(imm.data_segment_index, module()->num_declared_data_segments);
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- auto src_max =
- instance_object_->data_segment_sizes()[imm.data_segment_index];
- if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
- !base::IsInBounds(src, size, src_max)) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- Address src_addr =
- instance_object_->data_segment_starts()[imm.data_segment_index] +
- src;
- std::memmove(reinterpret_cast<void*>(dst_addr),
- reinterpret_cast<void*>(src_addr), size);
- return true;
- }
- case kExprDataDrop: {
- DataDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- // The data segment index must be in bounds since it is required by
- // validation.
- DCHECK_LT(imm.index, module()->num_declared_data_segments);
- *len += imm.length;
- instance_object_->data_segment_sizes()[imm.index] = 0;
- return true;
- }
- case kExprMemoryCopy: {
- MemoryCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- Address src_addr;
- if (!BoundsCheckMemRange(dst, &size, &dst_addr) ||
- !BoundsCheckMemRange(src, &size, &src_addr)) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
-
- std::memmove(reinterpret_cast<void*>(dst_addr),
- reinterpret_cast<void*>(src_addr), size);
- return true;
- }
- case kExprMemoryFill: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto value = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- Address dst_addr;
- bool ok = BoundsCheckMemRange(dst, &size, &dst_addr);
- if (!ok) {
- DoTrap(kTrapMemOutOfBounds, pc);
- return false;
- }
- std::memset(reinterpret_cast<void*>(dst_addr), value, size);
- return true;
- }
- case kExprTableInit: {
- TableInitImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- HandleScope scope(isolate_); // Avoid leaking handles.
- bool ok = WasmInstanceObject::InitTableEntries(
- instance_object_->GetIsolate(), instance_object_, imm.table.index,
- imm.elem_segment_index, dst, src, size);
- if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- return ok;
- }
- case kExprElemDrop: {
- ElemDropImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- *len += imm.length;
- instance_object_->dropped_elem_segments()[imm.index] = 1;
- return true;
- }
- case kExprTableCopy: {
- TableCopyImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc));
- auto size = Pop().to<uint32_t>();
- auto src = Pop().to<uint32_t>();
- auto dst = Pop().to<uint32_t>();
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- bool ok = WasmInstanceObject::CopyTableEntries(
- isolate_, instance_object_, imm.table_dst.index,
- imm.table_src.index, dst, src, size);
- if (!ok) DoTrap(kTrapTableOutOfBounds, pc);
- *len += imm.length;
- return ok;
- }
- case kExprTableGrow: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- auto delta = Pop().to<uint32_t>();
- auto value = Pop().to_anyref();
- int32_t result = WasmTableObject::Grow(isolate_, table, delta, value);
- Push(WasmValue(result));
- *len += imm.length;
- return true;
- }
- case kExprTableSize: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- Push(WasmValue(table_size));
- *len += imm.length;
- return true;
- }
- case kExprTableFill: {
- TableIndexImmediate<Decoder::kNoValidate> imm(decoder,
- code->at(pc + 1));
- HandleScope handle_scope(isolate_);
- auto count = Pop().to<uint32_t>();
- auto value = Pop().to_anyref();
- auto start = Pop().to<uint32_t>();
-
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- if (start > table_size) {
- DoTrap(kTrapTableOutOfBounds, pc);
- return false;
- }
-
- // Even when table.fill goes out-of-bounds, as many entries as possible
- // are put into the table. Only afterwards we trap.
- uint32_t fill_count = std::min(count, table_size - start);
- if (fill_count < count) {
- DoTrap(kTrapTableOutOfBounds, pc);
- return false;
- }
- WasmTableObject::Fill(isolate_, table, start, value, fill_count);
-
- *len += imm.length;
- return true;
- }
- default:
- FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
- OpcodeName(code->start[pc]));
- UNREACHABLE();
- }
- return false;
- }
-
- template <typename type, typename op_type, typename func>
- op_type ExecuteAtomicBinopBE(type val, Address addr, func op) {
- type old_val;
- type new_val;
- old_val = ReadUnalignedValue<type>(addr);
- do {
- new_val =
- ByteReverse(static_cast<type>(op(ByteReverse<type>(old_val), val)));
- } while (!(std::atomic_compare_exchange_strong(
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val)));
- return static_cast<op_type>(ByteReverse<type>(old_val));
- }
-
- template <typename type>
- type AdjustByteOrder(type param) {
-#if V8_TARGET_BIG_ENDIAN
- return ByteReverse(param);
-#else
- return param;
-#endif
- }
-
- bool ExecuteAtomicOp(WasmOpcode opcode, Decoder* decoder,
- InterpreterCode* code, pc_t pc, int* const len) {
-#if V8_TARGET_BIG_ENDIAN
- constexpr bool kBigEndian = true;
-#else
- constexpr bool kBigEndian = false;
-#endif
- WasmValue result;
- switch (opcode) {
-#define ATOMIC_BINOP_CASE(name, type, op_type, operation, op) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- op_type result; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- if (kBigEndian) { \
- auto oplambda = [](type a, type b) { return a op b; }; \
- result = ExecuteAtomicBinopBE<type, op_type>(val, addr, oplambda); \
- } else { \
- result = static_cast<op_type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), val)); \
- } \
- Push(WasmValue(result)); \
- break; \
- }
- ATOMIC_BINOP_CASE(I32AtomicAdd, uint32_t, uint32_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I32AtomicAdd8U, uint8_t, uint32_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I32AtomicAdd16U, uint16_t, uint32_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I32AtomicSub, uint32_t, uint32_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I32AtomicSub8U, uint8_t, uint32_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I32AtomicSub16U, uint16_t, uint32_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I32AtomicAnd, uint32_t, uint32_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicAnd8U, uint8_t, uint32_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicAnd16U, uint16_t, uint32_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I32AtomicOr, uint32_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicOr8U, uint8_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicOr16U, uint16_t, uint32_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I32AtomicXor, uint32_t, uint32_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I32AtomicXor8U, uint8_t, uint32_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I32AtomicXor16U, uint16_t, uint32_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I32AtomicExchange, uint32_t, uint32_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I32AtomicExchange8U, uint8_t, uint32_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I32AtomicExchange16U, uint16_t, uint32_t,
- atomic_exchange, =);
- ATOMIC_BINOP_CASE(I64AtomicAdd, uint64_t, uint64_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I64AtomicAdd8U, uint8_t, uint64_t, atomic_fetch_add, +);
- ATOMIC_BINOP_CASE(I64AtomicAdd16U, uint16_t, uint64_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I64AtomicAdd32U, uint32_t, uint64_t, atomic_fetch_add,
- +);
- ATOMIC_BINOP_CASE(I64AtomicSub, uint64_t, uint64_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I64AtomicSub8U, uint8_t, uint64_t, atomic_fetch_sub, -);
- ATOMIC_BINOP_CASE(I64AtomicSub16U, uint16_t, uint64_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I64AtomicSub32U, uint32_t, uint64_t, atomic_fetch_sub,
- -);
- ATOMIC_BINOP_CASE(I64AtomicAnd, uint64_t, uint64_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd8U, uint8_t, uint64_t, atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd16U, uint16_t, uint64_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicAnd32U, uint32_t, uint64_t,
- atomic_fetch_and, &);
- ATOMIC_BINOP_CASE(I64AtomicOr, uint64_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr8U, uint8_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr16U, uint16_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicOr32U, uint32_t, uint64_t, atomic_fetch_or, |);
- ATOMIC_BINOP_CASE(I64AtomicXor, uint64_t, uint64_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I64AtomicXor8U, uint8_t, uint64_t, atomic_fetch_xor, ^);
- ATOMIC_BINOP_CASE(I64AtomicXor16U, uint16_t, uint64_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I64AtomicXor32U, uint32_t, uint64_t, atomic_fetch_xor,
- ^);
- ATOMIC_BINOP_CASE(I64AtomicExchange, uint64_t, uint64_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I64AtomicExchange8U, uint8_t, uint64_t, atomic_exchange,
- =);
- ATOMIC_BINOP_CASE(I64AtomicExchange16U, uint16_t, uint64_t,
- atomic_exchange, =);
- ATOMIC_BINOP_CASE(I64AtomicExchange32U, uint32_t, uint64_t,
- atomic_exchange, =);
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_COMPARE_EXCHANGE_CASE(name, type, op_type) \
- case kExpr##name: { \
- type old_val; \
- type new_val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &old_val, &new_val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- old_val = AdjustByteOrder<type>(old_val); \
- new_val = AdjustByteOrder<type>(new_val); \
- std::atomic_compare_exchange_strong( \
- reinterpret_cast<std::atomic<type>*>(addr), &old_val, new_val); \
- Push(WasmValue(static_cast<op_type>(AdjustByteOrder<type>(old_val)))); \
- break; \
- }
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange, uint32_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange8U, uint8_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I32AtomicCompareExchange16U, uint16_t,
- uint32_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange, uint64_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange8U, uint8_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange16U, uint16_t,
- uint64_t);
- ATOMIC_COMPARE_EXCHANGE_CASE(I64AtomicCompareExchange32U, uint32_t,
- uint64_t);
-#undef ATOMIC_COMPARE_EXCHANGE_CASE
-#define ATOMIC_LOAD_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, \
- len)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- result = WasmValue(static_cast<op_type>(AdjustByteOrder<type>( \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr))))); \
- Push(result); \
- break; \
- }
- ATOMIC_LOAD_CASE(I32AtomicLoad, uint32_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad8U, uint8_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I32AtomicLoad16U, uint16_t, uint32_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad, uint64_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad8U, uint8_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad16U, uint16_t, uint64_t, atomic_load);
- ATOMIC_LOAD_CASE(I64AtomicLoad32U, uint32_t, uint64_t, atomic_load);
-#undef ATOMIC_LOAD_CASE
-#define ATOMIC_STORE_CASE(name, type, op_type, operation) \
- case kExpr##name: { \
- type val; \
- Address addr; \
- if (!ExtractAtomicOpParams<type, op_type>(decoder, code, &addr, pc, len, \
- &val)) { \
- return false; \
- } \
- static_assert(sizeof(std::atomic<type>) == sizeof(type), \
- "Size mismatch for types std::atomic<" #type \
- ">, and " #type); \
- std::operation(reinterpret_cast<std::atomic<type>*>(addr), \
- AdjustByteOrder<type>(val)); \
- break; \
- }
- ATOMIC_STORE_CASE(I32AtomicStore, uint32_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore8U, uint8_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I32AtomicStore16U, uint16_t, uint32_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore, uint64_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore8U, uint8_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore16U, uint16_t, uint64_t, atomic_store);
- ATOMIC_STORE_CASE(I64AtomicStore32U, uint32_t, uint64_t, atomic_store);
-#undef ATOMIC_STORE_CASE
- case kExprAtomicFence:
- std::atomic_thread_fence(std::memory_order_seq_cst);
- *len += 1;
- break;
- case kExprI32AtomicWait: {
- int32_t val;
- int64_t timeout;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int32_t>(
- decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::WaitWasm32(isolate_, array_buffer,
- buffer_offset, val, timeout);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- case kExprI64AtomicWait: {
- int64_t val;
- int64_t timeout;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int64_t>(
- decoder, code, pc, len, &buffer_offset, &val, &timeout)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::WaitWasm64(isolate_, array_buffer,
- buffer_offset, val, timeout);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- case kExprAtomicNotify: {
- int32_t val;
- uint32_t buffer_offset;
- if (!ExtractAtomicWaitNotifyParams<int32_t>(decoder, code, pc, len,
- &buffer_offset, &val)) {
- return false;
- }
- HandleScope handle_scope(isolate_);
- Handle<JSArrayBuffer> array_buffer(
- instance_object_->memory_object().array_buffer(), isolate_);
- auto result = FutexEmulation::Wake(array_buffer, buffer_offset, val);
- Push(WasmValue(result.ToSmi().value()));
- break;
- }
- default:
- UNREACHABLE();
- return false;
- }
- return true;
- }
-
- bool ExecuteSimdOp(WasmOpcode opcode, Decoder* decoder, InterpreterCode* code,
- pc_t pc, int* const len, uint32_t opcode_length) {
- switch (opcode) {
-#define SPLAT_CASE(format, sType, valType, num) \
- case kExpr##format##Splat: { \
- WasmValue val = Pop(); \
- valType v = val.to<valType>(); \
- sType s; \
- for (int i = 0; i < num; i++) s.val[i] = v; \
- Push(WasmValue(Simd128(s))); \
- return true; \
- }
- SPLAT_CASE(F64x2, float2, double, 2)
- SPLAT_CASE(F32x4, float4, float, 4)
- SPLAT_CASE(I64x2, int2, int64_t, 2)
- SPLAT_CASE(I32x4, int4, int32_t, 4)
- SPLAT_CASE(I16x8, int8, int32_t, 8)
- SPLAT_CASE(I8x16, int16, int32_t, 16)
-#undef SPLAT_CASE
-#define EXTRACT_LANE_CASE(format, name) \
- case kExpr##format##ExtractLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(ss.val[LANE(imm.lane, ss)])); \
- return true; \
- }
- EXTRACT_LANE_CASE(F64x2, f64x2)
- EXTRACT_LANE_CASE(F32x4, f32x4)
- EXTRACT_LANE_CASE(I64x2, i64x2)
- EXTRACT_LANE_CASE(I32x4, i32x4)
-#undef EXTRACT_LANE_CASE
-#define EXTRACT_LANE_EXTEND_CASE(format, name, sign, type) \
- case kExpr##format##ExtractLane##sign: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue val = Pop(); \
- Simd128 s = val.to_s128(); \
- auto ss = s.to_##name(); \
- Push(WasmValue(static_cast<type>(ss.val[LANE(imm.lane, ss)]))); \
- return true; \
- }
- EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, S, int32_t)
- EXTRACT_LANE_EXTEND_CASE(I16x8, i16x8, U, uint32_t)
- EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, S, int32_t)
- EXTRACT_LANE_EXTEND_CASE(I8x16, i8x16, U, uint32_t)
-#undef EXTRACT_LANE_EXTEND_CASE
-#define BINOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[LANE(i, s1)]; \
- auto b = s2.val[LANE(i, s1)]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[LANE(i, s1)] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- BINOP_CASE(F64x2Add, f64x2, float2, 2, a + b)
- BINOP_CASE(F64x2Sub, f64x2, float2, 2, a - b)
- BINOP_CASE(F64x2Mul, f64x2, float2, 2, a * b)
- BINOP_CASE(F64x2Div, f64x2, float2, 2, base::Divide(a, b))
- BINOP_CASE(F64x2Min, f64x2, float2, 2, JSMin(a, b))
- BINOP_CASE(F64x2Max, f64x2, float2, 2, JSMax(a, b))
- BINOP_CASE(F64x2Pmin, f64x2, float2, 2, std::min(a, b))
- BINOP_CASE(F64x2Pmax, f64x2, float2, 2, std::max(a, b))
- BINOP_CASE(F32x4Add, f32x4, float4, 4, a + b)
- BINOP_CASE(F32x4Sub, f32x4, float4, 4, a - b)
- BINOP_CASE(F32x4Mul, f32x4, float4, 4, a * b)
- BINOP_CASE(F32x4Div, f32x4, float4, 4, a / b)
- BINOP_CASE(F32x4Min, f32x4, float4, 4, JSMin(a, b))
- BINOP_CASE(F32x4Max, f32x4, float4, 4, JSMax(a, b))
- BINOP_CASE(F32x4Pmin, f32x4, float4, 4, std::min(a, b))
- BINOP_CASE(F32x4Pmax, f32x4, float4, 4, std::max(a, b))
- BINOP_CASE(I64x2Add, i64x2, int2, 2, base::AddWithWraparound(a, b))
- BINOP_CASE(I64x2Sub, i64x2, int2, 2, base::SubWithWraparound(a, b))
- BINOP_CASE(I64x2Mul, i64x2, int2, 2, base::MulWithWraparound(a, b))
- BINOP_CASE(I64x2MinS, i64x2, int2, 2, a < b ? a : b)
- BINOP_CASE(I64x2MinU, i64x2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b) ? a : b)
- BINOP_CASE(I64x2MaxS, i64x2, int2, 2, a > b ? a : b)
- BINOP_CASE(I64x2MaxU, i64x2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b) ? a : b)
- BINOP_CASE(I32x4Add, i32x4, int4, 4, base::AddWithWraparound(a, b))
- BINOP_CASE(I32x4Sub, i32x4, int4, 4, base::SubWithWraparound(a, b))
- BINOP_CASE(I32x4Mul, i32x4, int4, 4, base::MulWithWraparound(a, b))
- BINOP_CASE(I32x4MinS, i32x4, int4, 4, a < b ? a : b)
- BINOP_CASE(I32x4MinU, i32x4, int4, 4,
- static_cast<uint32_t>(a) < static_cast<uint32_t>(b) ? a : b)
- BINOP_CASE(I32x4MaxS, i32x4, int4, 4, a > b ? a : b)
- BINOP_CASE(I32x4MaxU, i32x4, int4, 4,
- static_cast<uint32_t>(a) > static_cast<uint32_t>(b) ? a : b)
- BINOP_CASE(S128And, i32x4, int4, 4, a & b)
- BINOP_CASE(S128Or, i32x4, int4, 4, a | b)
- BINOP_CASE(S128Xor, i32x4, int4, 4, a ^ b)
- BINOP_CASE(S128AndNot, i32x4, int4, 4, a & ~b)
- BINOP_CASE(I16x8Add, i16x8, int8, 8, base::AddWithWraparound(a, b))
- BINOP_CASE(I16x8Sub, i16x8, int8, 8, base::SubWithWraparound(a, b))
- BINOP_CASE(I16x8Mul, i16x8, int8, 8, base::MulWithWraparound(a, b))
- BINOP_CASE(I16x8MinS, i16x8, int8, 8, a < b ? a : b)
- BINOP_CASE(I16x8MinU, i16x8, int8, 8,
- static_cast<uint16_t>(a) < static_cast<uint16_t>(b) ? a : b)
- BINOP_CASE(I16x8MaxS, i16x8, int8, 8, a > b ? a : b)
- BINOP_CASE(I16x8MaxU, i16x8, int8, 8,
- static_cast<uint16_t>(a) > static_cast<uint16_t>(b) ? a : b)
- BINOP_CASE(I16x8AddSaturateS, i16x8, int8, 8, SaturateAdd<int16_t>(a, b))
- BINOP_CASE(I16x8AddSaturateU, i16x8, int8, 8, SaturateAdd<uint16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateS, i16x8, int8, 8, SaturateSub<int16_t>(a, b))
- BINOP_CASE(I16x8SubSaturateU, i16x8, int8, 8, SaturateSub<uint16_t>(a, b))
- BINOP_CASE(I16x8RoundingAverageU, i16x8, int8, 8,
- base::RoundingAverageUnsigned<uint16_t>(a, b))
- BINOP_CASE(I8x16Add, i8x16, int16, 16, base::AddWithWraparound(a, b))
- BINOP_CASE(I8x16Sub, i8x16, int16, 16, base::SubWithWraparound(a, b))
- BINOP_CASE(I8x16Mul, i8x16, int16, 16, base::MulWithWraparound(a, b))
- BINOP_CASE(I8x16MinS, i8x16, int16, 16, a < b ? a : b)
- BINOP_CASE(I8x16MinU, i8x16, int16, 16,
- static_cast<uint8_t>(a) < static_cast<uint8_t>(b) ? a : b)
- BINOP_CASE(I8x16MaxS, i8x16, int16, 16, a > b ? a : b)
- BINOP_CASE(I8x16MaxU, i8x16, int16, 16,
- static_cast<uint8_t>(a) > static_cast<uint8_t>(b) ? a : b)
- BINOP_CASE(I8x16AddSaturateS, i8x16, int16, 16, SaturateAdd<int8_t>(a, b))
- BINOP_CASE(I8x16AddSaturateU, i8x16, int16, 16,
- SaturateAdd<uint8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateS, i8x16, int16, 16, SaturateSub<int8_t>(a, b))
- BINOP_CASE(I8x16SubSaturateU, i8x16, int16, 16,
- SaturateSub<uint8_t>(a, b))
- BINOP_CASE(I8x16RoundingAverageU, i8x16, int16, 16,
- base::RoundingAverageUnsigned<uint8_t>(a, b))
-#undef BINOP_CASE
-#define UNOP_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- UNOP_CASE(F64x2Abs, f64x2, float2, 2, std::abs(a))
- UNOP_CASE(F64x2Neg, f64x2, float2, 2, -a)
- UNOP_CASE(F64x2Sqrt, f64x2, float2, 2, std::sqrt(a))
- UNOP_CASE(F32x4Abs, f32x4, float4, 4, std::abs(a))
- UNOP_CASE(F32x4Neg, f32x4, float4, 4, -a)
- UNOP_CASE(F32x4Sqrt, f32x4, float4, 4, std::sqrt(a))
- UNOP_CASE(F32x4RecipApprox, f32x4, float4, 4, base::Recip(a))
- UNOP_CASE(F32x4RecipSqrtApprox, f32x4, float4, 4, base::RecipSqrt(a))
- UNOP_CASE(I64x2Neg, i64x2, int2, 2, base::NegateWithWraparound(a))
- UNOP_CASE(I32x4Neg, i32x4, int4, 4, base::NegateWithWraparound(a))
- UNOP_CASE(I32x4Abs, i32x4, int4, 4, std::abs(a))
- UNOP_CASE(S128Not, i32x4, int4, 4, ~a)
- UNOP_CASE(I16x8Neg, i16x8, int8, 8, base::NegateWithWraparound(a))
- UNOP_CASE(I16x8Abs, i16x8, int8, 8, std::abs(a))
- UNOP_CASE(I8x16Neg, i8x16, int16, 16, base::NegateWithWraparound(a))
- UNOP_CASE(I8x16Abs, i8x16, int16, 16, std::abs(a))
-#undef UNOP_CASE
-
-// Cast to double in call to signbit is due to MSCV issue, see
-// https://github.com/microsoft/STL/issues/519.
-#define BITMASK_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- int32_t res = 0; \
- for (size_t i = 0; i < count; ++i) { \
- bool sign = std::signbit(static_cast<double>(s.val[i])); \
- res |= (sign << i); \
- } \
- Push(WasmValue(res)); \
- return true; \
- }
- BITMASK_CASE(I8x16BitMask, i8x16, int16, 16)
- BITMASK_CASE(I16x8BitMask, i16x8, int8, 8)
- BITMASK_CASE(I32x4BitMask, i32x4, int4, 4)
-#undef BITMASK_CASE
-
-#define CMPOP_CASE(op, name, stype, out_stype, count, expr) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- out_stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s1.val[i]; \
- auto b = s2.val[i]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[i] = result ? -1 : 0; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- CMPOP_CASE(F64x2Eq, f64x2, float2, int2, 2, a == b)
- CMPOP_CASE(F64x2Ne, f64x2, float2, int2, 2, a != b)
- CMPOP_CASE(F64x2Gt, f64x2, float2, int2, 2, a > b)
- CMPOP_CASE(F64x2Ge, f64x2, float2, int2, 2, a >= b)
- CMPOP_CASE(F64x2Lt, f64x2, float2, int2, 2, a < b)
- CMPOP_CASE(F64x2Le, f64x2, float2, int2, 2, a <= b)
- CMPOP_CASE(F32x4Eq, f32x4, float4, int4, 4, a == b)
- CMPOP_CASE(F32x4Ne, f32x4, float4, int4, 4, a != b)
- CMPOP_CASE(F32x4Gt, f32x4, float4, int4, 4, a > b)
- CMPOP_CASE(F32x4Ge, f32x4, float4, int4, 4, a >= b)
- CMPOP_CASE(F32x4Lt, f32x4, float4, int4, 4, a < b)
- CMPOP_CASE(F32x4Le, f32x4, float4, int4, 4, a <= b)
- CMPOP_CASE(I64x2Eq, i64x2, int2, int2, 2, a == b)
- CMPOP_CASE(I64x2Ne, i64x2, int2, int2, 2, a != b)
- CMPOP_CASE(I64x2GtS, i64x2, int2, int2, 2, a > b)
- CMPOP_CASE(I64x2GeS, i64x2, int2, int2, 2, a >= b)
- CMPOP_CASE(I64x2LtS, i64x2, int2, int2, 2, a < b)
- CMPOP_CASE(I64x2LeS, i64x2, int2, int2, 2, a <= b)
- CMPOP_CASE(I64x2GtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) > static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2GeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) >= static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LtU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) < static_cast<uint64_t>(b))
- CMPOP_CASE(I64x2LeU, i64x2, int2, int2, 2,
- static_cast<uint64_t>(a) <= static_cast<uint64_t>(b))
- CMPOP_CASE(I32x4Eq, i32x4, int4, int4, 4, a == b)
- CMPOP_CASE(I32x4Ne, i32x4, int4, int4, 4, a != b)
- CMPOP_CASE(I32x4GtS, i32x4, int4, int4, 4, a > b)
- CMPOP_CASE(I32x4GeS, i32x4, int4, int4, 4, a >= b)
- CMPOP_CASE(I32x4LtS, i32x4, int4, int4, 4, a < b)
- CMPOP_CASE(I32x4LeS, i32x4, int4, int4, 4, a <= b)
- CMPOP_CASE(I32x4GtU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) > static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4GeU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) >= static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4LtU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) < static_cast<uint32_t>(b))
- CMPOP_CASE(I32x4LeU, i32x4, int4, int4, 4,
- static_cast<uint32_t>(a) <= static_cast<uint32_t>(b))
- CMPOP_CASE(I16x8Eq, i16x8, int8, int8, 8, a == b)
- CMPOP_CASE(I16x8Ne, i16x8, int8, int8, 8, a != b)
- CMPOP_CASE(I16x8GtS, i16x8, int8, int8, 8, a > b)
- CMPOP_CASE(I16x8GeS, i16x8, int8, int8, 8, a >= b)
- CMPOP_CASE(I16x8LtS, i16x8, int8, int8, 8, a < b)
- CMPOP_CASE(I16x8LeS, i16x8, int8, int8, 8, a <= b)
- CMPOP_CASE(I16x8GtU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) > static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8GeU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) >= static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8LtU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) < static_cast<uint16_t>(b))
- CMPOP_CASE(I16x8LeU, i16x8, int8, int8, 8,
- static_cast<uint16_t>(a) <= static_cast<uint16_t>(b))
- CMPOP_CASE(I8x16Eq, i8x16, int16, int16, 16, a == b)
- CMPOP_CASE(I8x16Ne, i8x16, int16, int16, 16, a != b)
- CMPOP_CASE(I8x16GtS, i8x16, int16, int16, 16, a > b)
- CMPOP_CASE(I8x16GeS, i8x16, int16, int16, 16, a >= b)
- CMPOP_CASE(I8x16LtS, i8x16, int16, int16, 16, a < b)
- CMPOP_CASE(I8x16LeS, i8x16, int16, int16, 16, a <= b)
- CMPOP_CASE(I8x16GtU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) > static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16GeU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) >= static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16LtU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) < static_cast<uint8_t>(b))
- CMPOP_CASE(I8x16LeU, i8x16, int16, int16, 16,
- static_cast<uint8_t>(a) <= static_cast<uint8_t>(b))
-#undef CMPOP_CASE
-#define REPLACE_LANE_CASE(format, name, stype, ctype) \
- case kExpr##format##ReplaceLane: { \
- SimdLaneImmediate<Decoder::kNoValidate> imm(decoder, code->at(pc), \
- opcode_length); \
- *len += 1; \
- WasmValue new_val = Pop(); \
- WasmValue simd_val = Pop(); \
- stype s = simd_val.to_s128().to_##name(); \
- s.val[LANE(imm.lane, s)] = new_val.to<ctype>(); \
- Push(WasmValue(Simd128(s))); \
- return true; \
- }
- REPLACE_LANE_CASE(F64x2, f64x2, float2, double)
- REPLACE_LANE_CASE(F32x4, f32x4, float4, float)
- REPLACE_LANE_CASE(I64x2, i64x2, int2, int64_t)
- REPLACE_LANE_CASE(I32x4, i32x4, int4, int32_t)
- REPLACE_LANE_CASE(I16x8, i16x8, int8, int32_t)
- REPLACE_LANE_CASE(I8x16, i8x16, int16, int32_t)
-#undef REPLACE_LANE_CASE
- case kExprS128LoadMem:
- return ExecuteLoad<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128,
- /*prefix_len=*/opcode_length);
- case kExprS128StoreMem:
- return ExecuteStore<Simd128, Simd128>(decoder, code, pc, len,
- MachineRepresentation::kSimd128,
- /*prefix_len=*/opcode_length);
-#define SHIFT_CASE(op, name, stype, count, expr) \
- case kExpr##op: { \
- uint32_t shift = Pop().to<uint32_t>(); \
- WasmValue v = Pop(); \
- stype s = v.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; ++i) { \
- auto a = s.val[i]; \
- res.val[i] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- SHIFT_CASE(I64x2Shl, i64x2, int2, 2,
- static_cast<uint64_t>(a) << (shift % 64))
- SHIFT_CASE(I64x2ShrS, i64x2, int2, 2, a >> (shift % 64))
- SHIFT_CASE(I64x2ShrU, i64x2, int2, 2,
- static_cast<uint64_t>(a) >> (shift % 64))
- SHIFT_CASE(I32x4Shl, i32x4, int4, 4,
- static_cast<uint32_t>(a) << (shift % 32))
- SHIFT_CASE(I32x4ShrS, i32x4, int4, 4, a >> (shift % 32))
- SHIFT_CASE(I32x4ShrU, i32x4, int4, 4,
- static_cast<uint32_t>(a) >> (shift % 32))
- SHIFT_CASE(I16x8Shl, i16x8, int8, 8,
- static_cast<uint16_t>(a) << (shift % 16))
- SHIFT_CASE(I16x8ShrS, i16x8, int8, 8, a >> (shift % 16))
- SHIFT_CASE(I16x8ShrU, i16x8, int8, 8,
- static_cast<uint16_t>(a) >> (shift % 16))
- SHIFT_CASE(I8x16Shl, i8x16, int16, 16,
- static_cast<uint8_t>(a) << (shift % 8))
- SHIFT_CASE(I8x16ShrS, i8x16, int16, 16, a >> (shift % 8))
- SHIFT_CASE(I8x16ShrU, i8x16, int16, 16,
- static_cast<uint8_t>(a) >> (shift % 8))
-#undef SHIFT_CASE
-#define CONVERT_CASE(op, src_type, name, dst_type, count, start_index, ctype, \
- expr) \
- case kExpr##op: { \
- WasmValue v = Pop(); \
- src_type s = v.to_s128().to_##name(); \
- dst_type res; \
- for (size_t i = 0; i < count; ++i) { \
- ctype a = s.val[LANE(start_index + i, s)]; \
- auto result = expr; \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- res.val[LANE(i, res)] = expr; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- CONVERT_CASE(F32x4SConvertI32x4, int4, i32x4, float4, 4, 0, int32_t,
- static_cast<float>(a))
- CONVERT_CASE(F32x4UConvertI32x4, int4, i32x4, float4, 4, 0, uint32_t,
- static_cast<float>(a))
- CONVERT_CASE(I32x4SConvertF32x4, float4, f32x4, int4, 4, 0, double,
- std::isnan(a) ? 0
- : a<kMinInt ? kMinInt : a> kMaxInt
- ? kMaxInt
- : static_cast<int32_t>(a))
- CONVERT_CASE(I32x4UConvertF32x4, float4, f32x4, int4, 4, 0, double,
- std::isnan(a)
- ? 0
- : a<0 ? 0 : a> kMaxUInt32 ? kMaxUInt32
- : static_cast<uint32_t>(a))
- CONVERT_CASE(I32x4SConvertI16x8High, int8, i16x8, int4, 4, 4, int16_t,
- a)
- CONVERT_CASE(I32x4UConvertI16x8High, int8, i16x8, int4, 4, 4, uint16_t,
- a)
- CONVERT_CASE(I32x4SConvertI16x8Low, int8, i16x8, int4, 4, 0, int16_t, a)
- CONVERT_CASE(I32x4UConvertI16x8Low, int8, i16x8, int4, 4, 0, uint16_t,
- a)
- CONVERT_CASE(I16x8SConvertI8x16High, int16, i8x16, int8, 8, 8, int8_t,
- a)
- CONVERT_CASE(I16x8UConvertI8x16High, int16, i8x16, int8, 8, 8, uint8_t,
- a)
- CONVERT_CASE(I16x8SConvertI8x16Low, int16, i8x16, int8, 8, 0, int8_t, a)
- CONVERT_CASE(I16x8UConvertI8x16Low, int16, i8x16, int8, 8, 0, uint8_t,
- a)
-#undef CONVERT_CASE
-#define PACK_CASE(op, src_type, name, dst_type, count, ctype, dst_ctype) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- src_type s1 = v1.to_s128().to_##name(); \
- src_type s2 = v2.to_s128().to_##name(); \
- dst_type res; \
- int64_t min = std::numeric_limits<ctype>::min(); \
- int64_t max = std::numeric_limits<ctype>::max(); \
- for (size_t i = 0; i < count; ++i) { \
- int64_t v = i < count / 2 ? s1.val[LANE(i, s1)] \
- : s2.val[LANE(i - count / 2, s2)]; \
- res.val[LANE(i, res)] = \
- static_cast<dst_ctype>(std::max(min, std::min(max, v))); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- PACK_CASE(I16x8SConvertI32x4, int4, i32x4, int8, 8, int16_t, int16_t)
- PACK_CASE(I16x8UConvertI32x4, int4, i32x4, int8, 8, uint16_t, int16_t)
- PACK_CASE(I8x16SConvertI16x8, int8, i16x8, int16, 16, int8_t, int8_t)
- PACK_CASE(I8x16UConvertI16x8, int8, i16x8, int16, 16, uint8_t, int8_t)
-#undef PACK_CASE
- case kExprS128Select: {
- int4 bool_val = Pop().to_s128().to_i32x4();
- int4 v2 = Pop().to_s128().to_i32x4();
- int4 v1 = Pop().to_s128().to_i32x4();
- int4 res;
- for (size_t i = 0; i < 4; ++i) {
- res.val[i] = v2.val[i] ^ ((v1.val[i] ^ v2.val[i]) & bool_val.val[i]);
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
-#define ADD_HORIZ_CASE(op, name, stype, count) \
- case kExpr##op: { \
- WasmValue v2 = Pop(); \
- WasmValue v1 = Pop(); \
- stype s1 = v1.to_s128().to_##name(); \
- stype s2 = v2.to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count / 2; ++i) { \
- auto result1 = s1.val[LANE(i * 2, s1)] + s1.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result1); \
- res.val[LANE(i, s1)] = result1; \
- auto result2 = s2.val[LANE(i * 2, s1)] + s2.val[LANE(i * 2 + 1, s1)]; \
- possible_nondeterminism_ |= has_nondeterminism(result2); \
- res.val[LANE(i + count / 2, s1)] = result2; \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- ADD_HORIZ_CASE(I32x4AddHoriz, i32x4, int4, 4)
- ADD_HORIZ_CASE(F32x4AddHoriz, f32x4, float4, 4)
- ADD_HORIZ_CASE(I16x8AddHoriz, i16x8, int8, 8)
-#undef ADD_HORIZ_CASE
- case kExprS8x16Swizzle: {
- int16 v2 = Pop().to_s128().to_i8x16();
- int16 v1 = Pop().to_s128().to_i8x16();
- int16 res;
- for (size_t i = 0; i < kSimd128Size; ++i) {
- int lane = v2.val[LANE(i, v1)];
- res.val[LANE(i, v1)] =
- lane < kSimd128Size && lane >= 0 ? v1.val[LANE(lane, v1)] : 0;
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
- case kExprS8x16Shuffle: {
- Simd8x16ShuffleImmediate<Decoder::kNoValidate> imm(
- decoder, code->at(pc), opcode_length);
- *len += 16;
- int16 v2 = Pop().to_s128().to_i8x16();
- int16 v1 = Pop().to_s128().to_i8x16();
- int16 res;
- for (size_t i = 0; i < kSimd128Size; ++i) {
- int lane = imm.shuffle[i];
- res.val[LANE(i, v1)] = lane < kSimd128Size
- ? v1.val[LANE(lane, v1)]
- : v2.val[LANE(lane - kSimd128Size, v1)];
- }
- Push(WasmValue(Simd128(res)));
- return true;
- }
- case kExprS1x2AnyTrue:
- case kExprS1x4AnyTrue:
- case kExprS1x8AnyTrue:
- case kExprS1x16AnyTrue: {
- int4 s = Pop().to_s128().to_i32x4();
- bool res = s.val[0] | s.val[1] | s.val[2] | s.val[3];
- Push(WasmValue((res)));
- return true;
- }
-#define REDUCTION_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype s = Pop().to_s128().to_##name(); \
- bool res = true; \
- for (size_t i = 0; i < count; ++i) { \
- res = res & static_cast<bool>(s.val[i]); \
- } \
- Push(WasmValue(res)); \
- return true; \
- }
- REDUCTION_CASE(S1x2AllTrue, i64x2, int2, 2, &)
- REDUCTION_CASE(S1x4AllTrue, i32x4, int4, 4, &)
- REDUCTION_CASE(S1x8AllTrue, i16x8, int8, 8, &)
- REDUCTION_CASE(S1x16AllTrue, i8x16, int16, 16, &)
-#undef REDUCTION_CASE
-#define QFM_CASE(op, name, stype, count, operation) \
- case kExpr##op: { \
- stype c = Pop().to_s128().to_##name(); \
- stype b = Pop().to_s128().to_##name(); \
- stype a = Pop().to_s128().to_##name(); \
- stype res; \
- for (size_t i = 0; i < count; i++) { \
- res.val[i] = a.val[i] operation(b.val[i] * c.val[i]); \
- } \
- Push(WasmValue(Simd128(res))); \
- return true; \
- }
- QFM_CASE(F32x4Qfma, f32x4, float4, 4, +)
- QFM_CASE(F32x4Qfms, f32x4, float4, 4, -)
- QFM_CASE(F64x2Qfma, f64x2, float2, 2, +)
- QFM_CASE(F64x2Qfms, f64x2, float2, 2, -)
-#undef QFM_CASE
- case kExprS8x16LoadSplat: {
- return DoSimdLoadSplat<int16, int32_t, int8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord8);
- }
- case kExprS16x8LoadSplat: {
- return DoSimdLoadSplat<int8, int32_t, int16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord16);
- }
- case kExprS32x4LoadSplat: {
- return DoSimdLoadSplat<int4, int32_t, int32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord32);
- }
- case kExprS64x2LoadSplat: {
- return DoSimdLoadSplat<int2, int64_t, int64_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI16x8Load8x8S: {
- return DoSimdLoadExtend<int8, int16_t, int8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI16x8Load8x8U: {
- return DoSimdLoadExtend<int8, uint16_t, uint8_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI32x4Load16x4S: {
- return DoSimdLoadExtend<int4, int32_t, int16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI32x4Load16x4U: {
- return DoSimdLoadExtend<int4, uint32_t, uint16_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI64x2Load32x2S: {
- return DoSimdLoadExtend<int2, int64_t, int32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- case kExprI64x2Load32x2U: {
- return DoSimdLoadExtend<int2, uint64_t, uint32_t>(
- decoder, code, pc, len, MachineRepresentation::kWord64);
- }
- default:
- return false;
- }
- }
-
- template <typename s_type, typename result_type, typename load_type>
- bool DoSimdLoadSplat(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- // len is the number of bytes the make up this op, including prefix byte, so
- // the prefix_len for ExecuteLoad is len, minus the prefix byte itself.
- // Think of prefix_len as: number of extra bytes that make up this op.
- if (!ExecuteLoad<result_type, load_type>(decoder, code, pc, len, rep,
- /*prefix_len=*/*len - 1)) {
- return false;
- }
- result_type v = Pop().to<result_type>();
- s_type s;
- for (size_t i = 0; i < arraysize(s.val); i++) s.val[i] = v;
- Push(WasmValue(Simd128(s)));
- return true;
- }
-
- template <typename s_type, typename wide_type, typename narrow_type>
- bool DoSimdLoadExtend(Decoder* decoder, InterpreterCode* code, pc_t pc,
- int* const len, MachineRepresentation rep) {
- static_assert(sizeof(wide_type) == sizeof(narrow_type) * 2,
- "size mismatch for wide and narrow types");
- if (!ExecuteLoad<uint64_t, uint64_t>(decoder, code, pc, len, rep,
- /*prefix_len=*/*len - 1)) {
- return false;
- }
- constexpr int lanes = kSimd128Size / sizeof(wide_type);
- uint64_t v = Pop().to_u64();
- s_type s;
- for (int i = 0; i < lanes; i++) {
- uint8_t shift = i * (sizeof(narrow_type) * 8);
- narrow_type el = static_cast<narrow_type>(v >> shift);
- s.val[i] = static_cast<wide_type>(el);
- }
- Push(WasmValue(Simd128(s)));
- return true;
- }
-
- // Check if our control stack (frames_) exceeds the limit. Trigger stack
- // overflow if it does, and unwinding the current frame.
- // Returns true if execution can continue, false if the current activation was
- // fully unwound.
- // Do call this function immediately *after* pushing a new frame. The pc of
- // the top frame will be reset to 0 if the stack check fails.
- bool DoStackCheck() V8_WARN_UNUSED_RESULT {
- // The goal of this stack check is not to prevent actual stack overflows,
- // but to simulate stack overflows during the execution of compiled code.
- // That is why this function uses FLAG_stack_size, even though the value
- // stack actually lies in zone memory.
- const size_t stack_size_limit = FLAG_stack_size * KB;
- // Sum up the value stack size and the control stack size.
- const size_t current_stack_size = (sp_ - stack_.get()) * sizeof(*sp_) +
- frames_.size() * sizeof(frames_[0]);
- if (V8_LIKELY(current_stack_size <= stack_size_limit)) {
- return true;
- }
- // The pc of the top frame is initialized to the first instruction. We reset
- // it to 0 here such that we report the same position as in compiled code.
- frames_.back().pc = 0;
- isolate_->StackOverflow();
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- void EncodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t value) {
- encoded_values->set((*encoded_index)++, Smi::FromInt(value >> 16));
- encoded_values->set((*encoded_index)++, Smi::FromInt(value & 0xffff));
- }
-
- void EncodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t value) {
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value >> 32));
- EncodeI32ExceptionValue(encoded_values, encoded_index,
- static_cast<uint32_t>(value));
- }
-
- // Allocate, initialize and throw a new exception. The exception values are
- // being popped off the operand stack. Returns true if the exception is being
- // handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoThrowException(const WasmException* exception,
- uint32_t index) V8_WARN_UNUSED_RESULT {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<WasmExceptionTag> exception_tag(
- WasmExceptionTag::cast(instance_object_->exceptions_table().get(index)),
- isolate_);
- uint32_t encoded_size = WasmExceptionPackage::GetEncodedSize(exception);
- Handle<WasmExceptionPackage> exception_object =
- WasmExceptionPackage::New(isolate_, exception_tag, encoded_size);
- Handle<FixedArray> encoded_values = Handle<FixedArray>::cast(
- WasmExceptionPackage::GetExceptionValues(isolate_, exception_object));
- // Encode the exception values on the operand stack into the exception
- // package allocated above. This encoding has to be in sync with other
- // backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
- uint32_t encoded_index = 0;
- sp_t base_index = StackHeight() - sig->parameter_count();
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- WasmValue value = GetStackValue(base_index + i);
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
- uint32_t u32 = value.to_u32();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, u32);
- break;
- }
- case ValueType::kF32: {
- uint32_t f32 = value.to_f32_boxed().get_bits();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, f32);
- break;
- }
- case ValueType::kI64: {
- uint64_t u64 = value.to_u64();
- EncodeI64ExceptionValue(encoded_values, &encoded_index, u64);
- break;
- }
- case ValueType::kF64: {
- uint64_t f64 = value.to_f64_boxed().get_bits();
- EncodeI64ExceptionValue(encoded_values, &encoded_index, f64);
- break;
- }
- case ValueType::kS128: {
- int4 s128 = value.to_s128().to_i32x4();
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[0]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[1]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[2]);
- EncodeI32ExceptionValue(encoded_values, &encoded_index, s128.val[3]);
- break;
- }
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> anyref = value.to_anyref();
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
- encoded_values->set(encoded_index++, *anyref);
- break;
- }
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- }
- DCHECK_EQ(encoded_size, encoded_index);
- Drop(static_cast<int>(sig->parameter_count()));
- // Now that the exception is ready, set it as pending.
- isolate_->Throw(*exception_object);
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- // Throw a given existing exception. Returns true if the exception is being
- // handled locally by the interpreter, false otherwise (interpreter exits).
- bool DoRethrowException(WasmValue exception) {
- isolate_->ReThrow(*exception.to_anyref());
- return HandleException(isolate_) == WasmInterpreter::Thread::HANDLED;
- }
-
- // Determines whether the given exception has a tag matching the expected tag
- // for the given index within the exception table of the current instance.
- bool MatchingExceptionTag(Handle<Object> exception_object, uint32_t index) {
- if (!exception_object->IsWasmExceptionPackage(isolate_)) return false;
- Handle<Object> caught_tag = WasmExceptionPackage::GetExceptionTag(
- isolate_, Handle<WasmExceptionPackage>::cast(exception_object));
- Handle<Object> expected_tag =
- handle(instance_object_->exceptions_table().get(index), isolate_);
- DCHECK(expected_tag->IsWasmExceptionTag());
- return expected_tag.is_identical_to(caught_tag);
- }
-
- void DecodeI32ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint32_t* value) {
- uint32_t msb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- uint32_t lsb = Smi::cast(encoded_values->get((*encoded_index)++)).value();
- *value = (msb << 16) | (lsb & 0xffff);
- }
-
- void DecodeI64ExceptionValue(Handle<FixedArray> encoded_values,
- uint32_t* encoded_index, uint64_t* value) {
- uint32_t lsb = 0, msb = 0;
- DecodeI32ExceptionValue(encoded_values, encoded_index, &msb);
- DecodeI32ExceptionValue(encoded_values, encoded_index, &lsb);
- *value = (static_cast<uint64_t>(msb) << 32) | static_cast<uint64_t>(lsb);
- }
-
- // Unpack the values encoded in the given exception. The exception values are
- // pushed onto the operand stack. Callers must perform a tag check to ensure
- // the encoded values match the expected signature of the exception.
- void DoUnpackException(const WasmException* exception,
- Handle<Object> exception_object) {
- Handle<FixedArray> encoded_values =
- Handle<FixedArray>::cast(WasmExceptionPackage::GetExceptionValues(
- isolate_, Handle<WasmExceptionPackage>::cast(exception_object)));
- // Decode the exception values from the given exception package and push
- // them onto the operand stack. This encoding has to be in sync with other
- // backends so that exceptions can be passed between them.
- const WasmExceptionSig* sig = exception->sig;
- uint32_t encoded_index = 0;
- for (size_t i = 0; i < sig->parameter_count(); ++i) {
- WasmValue value;
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32: {
- uint32_t u32 = 0;
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &u32);
- value = WasmValue(u32);
- break;
- }
- case ValueType::kF32: {
- uint32_t f32_bits = 0;
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &f32_bits);
- value = WasmValue(Float32::FromBits(f32_bits));
- break;
- }
- case ValueType::kI64: {
- uint64_t u64 = 0;
- DecodeI64ExceptionValue(encoded_values, &encoded_index, &u64);
- value = WasmValue(u64);
- break;
- }
- case ValueType::kF64: {
- uint64_t f64_bits = 0;
- DecodeI64ExceptionValue(encoded_values, &encoded_index, &f64_bits);
- value = WasmValue(Float64::FromBits(f64_bits));
- break;
- }
- case ValueType::kS128: {
- int4 s128 = {0, 0, 0, 0};
- uint32_t* vals = reinterpret_cast<uint32_t*>(s128.val);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[0]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[1]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[2]);
- DecodeI32ExceptionValue(encoded_values, &encoded_index, &vals[3]);
- value = WasmValue(Simd128(s128));
- break;
- }
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> anyref(encoded_values->get(encoded_index++), isolate_);
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef, anyref->IsNull());
- value = WasmValue(anyref);
- break;
- }
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- Push(value);
- }
- DCHECK_EQ(WasmExceptionPackage::GetEncodedSize(exception), encoded_index);
- }
-
- void Execute(InterpreterCode* code, pc_t pc, int max) {
- DCHECK_NOT_NULL(code->side_table);
- DCHECK(!frames_.empty());
- // There must be enough space on the stack to hold the arguments, locals,
- // and the value stack.
- DCHECK_LE(code->function->sig->parameter_count() +
- code->locals.type_list.size() +
- code->side_table->max_stack_height_,
- stack_limit_ - stack_.get() - frames_.back().sp);
- // Seal the surrounding {HandleScope} to ensure that all cases within the
- // interpreter switch below which deal with handles open their own scope.
- // This avoids leaking / accumulating handles in the surrounding scope.
- SealHandleScope shs(isolate_);
-
- Decoder decoder(code->start, code->end);
- pc_t limit = code->end - code->start;
- bool hit_break = false;
-
- while (true) {
-#define PAUSE_IF_BREAK_FLAG(flag) \
- if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) { \
- hit_break = true; \
- max = 0; \
- }
-
- DCHECK_GT(limit, pc);
- DCHECK_NOT_NULL(code->start);
-
- // Do first check for a breakpoint, in order to set hit_break correctly.
- const char* skip = " ";
- int len = 1;
- // We need to store this, because SIMD opcodes are LEB encoded, and later
- // on when executing, we need to know where to read immediates from.
- uint32_t simd_opcode_length = 0;
- byte orig = code->start[pc];
- WasmOpcode opcode = static_cast<WasmOpcode>(orig);
- if (WasmOpcodes::IsPrefixOpcode(opcode)) {
- opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
- &code->start[pc], &simd_opcode_length);
- len += simd_opcode_length;
- }
- if (V8_UNLIKELY(orig == kInternalBreakpoint)) {
- orig = code->orig_start[pc];
- if (WasmOpcodes::IsPrefixOpcode(static_cast<WasmOpcode>(orig))) {
- opcode = decoder.read_prefixed_opcode<Decoder::kNoValidate>(
- &code->start[pc]);
- }
- if (SkipBreakpoint(code, pc)) {
- // skip breakpoint by switching on original code.
- skip = "[skip] ";
- } else {
- TRACE("@%-3zu: [break] %-24s:", pc, WasmOpcodes::OpcodeName(opcode));
- TraceValueStack();
- TRACE("\n");
- hit_break = true;
- break;
- }
- }
-
- // If max is 0, break. If max is positive (a limit is set), decrement it.
- if (max >= 0 && WasmOpcodes::IsBreakable(opcode)) {
- if (max == 0) break;
- --max;
- }
-
- USE(skip);
- TRACE("@%-3zu: %s%-24s:", pc, skip, WasmOpcodes::OpcodeName(opcode));
- TraceValueStack();
- TRACE("\n");
-
-#ifdef DEBUG
- // Compute the stack effect of this opcode, and verify later that the
- // stack was modified accordingly.
- std::pair<uint32_t, uint32_t> stack_effect =
- StackEffect(codemap_->module(), frames_.back().code->function->sig,
- code->orig_start + pc, code->orig_end);
- sp_t expected_new_stack_height =
- StackHeight() - stack_effect.first + stack_effect.second;
-#endif
-
- switch (orig) {
- case kExprNop:
- break;
- case kExprBlock:
- case kExprLoop:
- case kExprTry: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- len = 1 + imm.length;
- break;
- }
- case kExprIf: {
- BlockTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- WasmValue cond = Pop();
- bool is_true = cond.to<uint32_t>() != 0;
- if (is_true) {
- // fall through to the true block.
- len = 1 + imm.length;
- TRACE(" true => fallthrough\n");
- } else {
- len = LookupTargetDelta(code, pc);
- TRACE(" false => @%zu\n", pc + len);
- }
- break;
- }
- case kExprElse:
- case kExprCatch: {
- len = LookupTargetDelta(code, pc);
- TRACE(" end => @%zu\n", pc + len);
- break;
- }
- case kExprThrow: {
- ExceptionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- CommitPc(pc); // Needed for local unwinding.
- const WasmException* exception = &module()->exceptions[imm.index];
- if (!DoThrowException(exception, imm.index)) return;
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- case kExprRethrow: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue ex = Pop();
- if (ex.to_anyref()->IsNull()) return DoTrap(kTrapRethrowNullRef, pc);
- CommitPc(pc); // Needed for local unwinding.
- if (!DoRethrowException(ex)) return;
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- case kExprBrOnExn: {
- BranchOnExceptionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue ex = Pop();
- Handle<Object> exception = ex.to_anyref();
- if (exception->IsNull()) return DoTrap(kTrapBrOnExnNullRef, pc);
- if (MatchingExceptionTag(exception, imm.index.index)) {
- imm.index.exception = &module()->exceptions[imm.index.index];
- DoUnpackException(imm.index.exception, exception);
- len = DoBreak(code, pc, imm.depth.depth);
- TRACE(" match => @%zu\n", pc + len);
- } else {
- Push(ex); // Exception remains on stack.
- TRACE(" false => fallthrough\n");
- len = 1 + imm.length;
- }
- break;
- }
- case kExprSelectWithType: {
- SelectTypeImmediate<Decoder::kNoValidate> imm(WasmFeatures::All(),
- &decoder, code->at(pc));
- len = 1 + imm.length;
- V8_FALLTHROUGH;
- }
- case kExprSelect: {
- HandleScope scope(isolate_); // Avoid leaking handles.
- WasmValue cond = Pop();
- WasmValue fval = Pop();
- WasmValue tval = Pop();
- Push(cond.to<int32_t>() != 0 ? tval : fval);
- break;
- }
- case kExprBr: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- len = DoBreak(code, pc, imm.depth);
- TRACE(" br => @%zu\n", pc + len);
- break;
- }
- case kExprBrIf: {
- BranchDepthImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- WasmValue cond = Pop();
- bool is_true = cond.to<uint32_t>() != 0;
- if (is_true) {
- len = DoBreak(code, pc, imm.depth);
- TRACE(" br_if => @%zu\n", pc + len);
- } else {
- TRACE(" false => fallthrough\n");
- len = 1 + imm.length;
- }
- break;
- }
- case kExprBrTable: {
- BranchTableImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- BranchTableIterator<Decoder::kNoValidate> iterator(&decoder, imm);
- uint32_t key = Pop().to<uint32_t>();
- uint32_t depth = 0;
- if (key >= imm.table_count) key = imm.table_count;
- for (uint32_t i = 0; i <= key; i++) {
- DCHECK(iterator.has_next());
- depth = iterator.next();
- }
- len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
- TRACE(" br[%u] => @%zu\n", key, pc + key + len);
- break;
- }
- case kExprReturn: {
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- continue; // Do not bump pc.
- }
- case kExprUnreachable: {
- return DoTrap(kTrapUnreachable, pc);
- }
- case kExprEnd: {
- break;
- }
- case kExprI32Const: {
- ImmI32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprI64Const: {
- ImmI64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprF32Const: {
- ImmF32Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprF64Const: {
- ImmF64Immediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- Push(WasmValue(imm.value));
- len = 1 + imm.length;
- break;
- }
- case kExprRefNull: {
- Push(WasmValue(isolate_->factory()->null_value()));
- break;
- }
- case kExprRefFunc: {
- FunctionIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
-
- Handle<WasmExternalFunction> function =
- WasmInstanceObject::GetOrCreateWasmExternalFunction(
- isolate_, instance_object_, imm.index);
- Push(WasmValue(function));
- len = 1 + imm.length;
- break;
- }
- case kExprLocalGet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Push(GetStackValue(frames_.back().sp + imm.index));
- len = 1 + imm.length;
- break;
- }
- case kExprLocalSet: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue val = Pop();
- SetStackValue(frames_.back().sp + imm.index, val);
- len = 1 + imm.length;
- break;
- }
- case kExprLocalTee: {
- LocalIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- WasmValue val = Pop();
- SetStackValue(frames_.back().sp + imm.index, val);
- Push(val);
- len = 1 + imm.length;
- break;
- }
- case kExprDrop: {
- Drop();
- break;
- }
- case kExprCallFunction: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- InterpreterCode* target = codemap()->GetCode(imm.index);
- if (target->function->imported) {
- CommitPc(pc);
- ExternalCallResult result =
- CallImportedFunction(target->function->func_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // The import is a function of this instance. Call it directly.
- DCHECK(!result.interpreter_code->function->imported);
- break;
- case ExternalCallResult::INVALID_FUNC:
- case ExternalCallResult::SIGNATURE_MISMATCH:
- // Direct calls are checked statically.
- UNREACHABLE();
- case ExternalCallResult::EXTERNAL_RETURNED:
- PAUSE_IF_BREAK_FLAG(AfterCall);
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- if (result.type != ExternalCallResult::INTERNAL) break;
- }
- // Execute an internal call.
- if (!DoCall(&decoder, target, &pc, &limit)) return;
- code = target;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- } break;
-
- case kExprCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
- WasmFeatures::All(), &decoder, code->at(pc));
- uint32_t entry_index = Pop().to<uint32_t>();
- CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
- ExternalCallResult result =
- CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // The import is a function of this instance. Call it directly.
- if (!DoCall(&decoder, result.interpreter_code, &pc, &limit))
- return;
- code = result.interpreter_code;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- case ExternalCallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
- case ExternalCallResult::SIGNATURE_MISMATCH:
- return DoTrap(kTrapFuncSigMismatch, pc);
- case ExternalCallResult::EXTERNAL_RETURNED:
- PAUSE_IF_BREAK_FLAG(AfterCall);
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue; // Do not bump pc.
- }
- } break;
-
- case kExprReturnCall: {
- CallFunctionImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- InterpreterCode* target = codemap()->GetCode(imm.index);
-
- if (!target->function->imported) {
- // Enter internal found function.
- if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
- code = target;
- PAUSE_IF_BREAK_FLAG(AfterCall);
-
- continue; // Do not bump pc.
- }
- // Function is imported.
- CommitPc(pc);
- ExternalCallResult result =
- CallImportedFunction(target->function->func_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL:
- // Cannot import internal functions.
- case ExternalCallResult::INVALID_FUNC:
- case ExternalCallResult::SIGNATURE_MISMATCH:
- // Direct calls are checked statically.
- UNREACHABLE();
- case ExternalCallResult::EXTERNAL_RETURNED:
- len = 1 + imm.length;
- break;
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- continue;
- }
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- continue;
- } break;
-
- case kExprReturnCallIndirect: {
- CallIndirectImmediate<Decoder::kNoValidate> imm(
- WasmFeatures::All(), &decoder, code->at(pc));
- uint32_t entry_index = Pop().to<uint32_t>();
- CommitPc(pc); // TODO(wasm): Be more disciplined about committing PC.
-
- // TODO(wasm): Calling functions needs some refactoring to avoid
- // multi-exit code like this.
- ExternalCallResult result =
- CallIndirectFunction(imm.table_index, entry_index, imm.sig_index);
- switch (result.type) {
- case ExternalCallResult::INTERNAL: {
- InterpreterCode* target = result.interpreter_code;
-
- DCHECK(!target->function->imported);
-
- // The function belongs to this instance. Enter it directly.
- if (!DoReturnCall(&decoder, target, &pc, &limit)) return;
- code = result.interpreter_code;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- continue; // Do not bump pc.
- }
- case ExternalCallResult::INVALID_FUNC:
- return DoTrap(kTrapFuncInvalid, pc);
- case ExternalCallResult::SIGNATURE_MISMATCH:
- return DoTrap(kTrapFuncSigMismatch, pc);
- case ExternalCallResult::EXTERNAL_RETURNED: {
- len = 1 + imm.length;
-
- size_t arity = code->function->sig->return_count();
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterCall);
- break;
- }
- case ExternalCallResult::EXTERNAL_UNWOUND:
- return;
-
- case ExternalCallResult::EXTERNAL_CAUGHT:
- ReloadFromFrameOnException(&decoder, &code, &pc, &limit);
- break;
- }
- } break;
-
- case kExprGlobalGet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- HandleScope handle_scope(isolate_);
- Push(WasmInstanceObject::GetGlobalValue(
- instance_object_, module()->globals[imm.index]));
- len = 1 + imm.length;
- break;
- }
- case kExprGlobalSet: {
- GlobalIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- auto& global = module()->globals[imm.index];
- switch (global.type.kind()) {
-#define CASE_TYPE(valuetype, ctype) \
- case ValueType::valuetype: { \
- uint8_t* ptr = \
- WasmInstanceObject::GetGlobalStorage(instance_object_, global); \
- WriteLittleEndianValue<ctype>(reinterpret_cast<Address>(ptr), \
- Pop().to<ctype>()); \
- break; \
- }
- FOREACH_WASMVALUE_CTYPES(CASE_TYPE)
-#undef CASE_TYPE
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef: {
- // TODO(7748): Type checks or DCHECKs for ref types?
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<FixedArray> global_buffer; // The buffer of the global.
- uint32_t global_index; // The index into the buffer.
- std::tie(global_buffer, global_index) =
- WasmInstanceObject::GetGlobalBufferAndIndex(instance_object_,
- global);
- Handle<Object> ref = Pop().to_anyref();
- DCHECK_IMPLIES(global.type == kWasmNullRef, ref->IsNull());
- global_buffer->set(global_index, *ref);
- break;
- }
- case ValueType::kStmt:
- case ValueType::kBottom:
- UNREACHABLE();
- }
- len = 1 + imm.length;
- break;
- }
- case kExprTableGet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- uint32_t entry_index = Pop().to<uint32_t>();
- if (entry_index >= table_size) {
- return DoTrap(kTrapTableOutOfBounds, pc);
- }
- Handle<Object> value =
- WasmTableObject::Get(isolate_, table, entry_index);
- Push(WasmValue(value));
- len = 1 + imm.length;
- break;
- }
- case kExprTableSet: {
- TableIndexImmediate<Decoder::kNoValidate> imm(&decoder, code->at(pc));
- HandleScope handle_scope(isolate_);
- auto table = handle(
- WasmTableObject::cast(instance_object_->tables().get(imm.index)),
- isolate_);
- uint32_t table_size = table->current_length();
- Handle<Object> value = Pop().to_anyref();
- uint32_t entry_index = Pop().to<uint32_t>();
- if (entry_index >= table_size) {
- return DoTrap(kTrapTableOutOfBounds, pc);
- }
- WasmTableObject::Set(isolate_, table, entry_index, value);
- len = 1 + imm.length;
- break;
- }
-#define LOAD_CASE(name, ctype, mtype, rep) \
- case kExpr##name: { \
- if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, &len, \
- MachineRepresentation::rep)) \
- return; \
- break; \
- }
-
- LOAD_CASE(I32LoadMem8S, int32_t, int8_t, kWord8);
- LOAD_CASE(I32LoadMem8U, int32_t, uint8_t, kWord8);
- LOAD_CASE(I32LoadMem16S, int32_t, int16_t, kWord16);
- LOAD_CASE(I32LoadMem16U, int32_t, uint16_t, kWord16);
- LOAD_CASE(I64LoadMem8S, int64_t, int8_t, kWord8);
- LOAD_CASE(I64LoadMem8U, int64_t, uint8_t, kWord16);
- LOAD_CASE(I64LoadMem16S, int64_t, int16_t, kWord16);
- LOAD_CASE(I64LoadMem16U, int64_t, uint16_t, kWord16);
- LOAD_CASE(I64LoadMem32S, int64_t, int32_t, kWord32);
- LOAD_CASE(I64LoadMem32U, int64_t, uint32_t, kWord32);
- LOAD_CASE(I32LoadMem, int32_t, int32_t, kWord32);
- LOAD_CASE(I64LoadMem, int64_t, int64_t, kWord64);
- LOAD_CASE(F32LoadMem, Float32, uint32_t, kFloat32);
- LOAD_CASE(F64LoadMem, Float64, uint64_t, kFloat64);
-#undef LOAD_CASE
-
-#define STORE_CASE(name, ctype, mtype, rep) \
- case kExpr##name: { \
- if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, &len, \
- MachineRepresentation::rep)) \
- return; \
- break; \
- }
-
- STORE_CASE(I32StoreMem8, int32_t, int8_t, kWord8);
- STORE_CASE(I32StoreMem16, int32_t, int16_t, kWord16);
- STORE_CASE(I64StoreMem8, int64_t, int8_t, kWord8);
- STORE_CASE(I64StoreMem16, int64_t, int16_t, kWord16);
- STORE_CASE(I64StoreMem32, int64_t, int32_t, kWord32);
- STORE_CASE(I32StoreMem, int32_t, int32_t, kWord32);
- STORE_CASE(I64StoreMem, int64_t, int64_t, kWord64);
- STORE_CASE(F32StoreMem, Float32, uint32_t, kFloat32);
- STORE_CASE(F64StoreMem, Float64, uint64_t, kFloat64);
-#undef STORE_CASE
-
-#define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
- case kExpr##name: { \
- uint32_t index = Pop().to<uint32_t>(); \
- ctype result; \
- Address addr = BoundsCheckMem<mtype>(0, index); \
- if (!addr) { \
- result = defval; \
- } else { \
- /* TODO(titzer): alignment for asmjs load mem? */ \
- result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
- } \
- Push(WasmValue(result)); \
- break; \
- }
- ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
- ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
- ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
- std::numeric_limits<float>::quiet_NaN());
- ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
- std::numeric_limits<double>::quiet_NaN());
-#undef ASMJS_LOAD_CASE
-
-#define ASMJS_STORE_CASE(name, ctype, mtype) \
- case kExpr##name: { \
- WasmValue val = Pop(); \
- uint32_t index = Pop().to<uint32_t>(); \
- Address addr = BoundsCheckMem<mtype>(0, index); \
- if (addr) { \
- *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
- } \
- Push(val); \
- break; \
- }
-
- ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
- ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
- ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
- ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
- ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
-#undef ASMJS_STORE_CASE
- case kExprMemoryGrow: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- uint32_t delta_pages = Pop().to<uint32_t>();
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<WasmMemoryObject> memory(instance_object_->memory_object(),
- isolate_);
- int32_t result =
- WasmMemoryObject::Grow(isolate_, memory, delta_pages);
- Push(WasmValue(result));
- len = 1 + imm.length;
- // Treat one grow_memory instruction like 1000 other instructions,
- // because it is a really expensive operation.
- if (max > 0) max = std::max(0, max - 1000);
- break;
- }
- case kExprMemorySize: {
- MemoryIndexImmediate<Decoder::kNoValidate> imm(&decoder,
- code->at(pc));
- Push(WasmValue(static_cast<uint32_t>(instance_object_->memory_size() /
- kWasmPageSize)));
- len = 1 + imm.length;
- break;
- }
- // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
- // specially to guarantee that the quiet bit of a NaN is preserved on
- // ia32 by the reinterpret casts.
- case kExprI32ReinterpretF32: {
- WasmValue val = Pop();
- Push(WasmValue(ExecuteI32ReinterpretF32(val)));
- break;
- }
- case kExprI64ReinterpretF64: {
- WasmValue val = Pop();
- Push(WasmValue(ExecuteI64ReinterpretF64(val)));
- break;
- }
-#define SIGN_EXTENSION_CASE(name, wtype, ntype) \
- case kExpr##name: { \
- ntype val = static_cast<ntype>(Pop().to<wtype>()); \
- Push(WasmValue(static_cast<wtype>(val))); \
- break; \
- }
- SIGN_EXTENSION_CASE(I32SExtendI8, int32_t, int8_t);
- SIGN_EXTENSION_CASE(I32SExtendI16, int32_t, int16_t);
- SIGN_EXTENSION_CASE(I64SExtendI8, int64_t, int8_t);
- SIGN_EXTENSION_CASE(I64SExtendI16, int64_t, int16_t);
- SIGN_EXTENSION_CASE(I64SExtendI32, int64_t, int32_t);
-#undef SIGN_EXTENSION_CASE
- case kExprRefIsNull: {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t result = Pop().to_anyref()->IsNull() ? 1 : 0;
- Push(WasmValue(result));
- break;
- }
- case kNumericPrefix: {
- if (!ExecuteNumericOp(opcode, &decoder, code, pc, &len)) return;
- break;
- }
- case kAtomicPrefix: {
- if (!ExecuteAtomicOp(opcode, &decoder, code, pc, &len)) return;
- break;
- }
- case kSimdPrefix: {
- if (!ExecuteSimdOp(opcode, &decoder, code, pc, &len,
- simd_opcode_length))
- return;
- break;
- }
-
-#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
- case kExpr##name: { \
- WasmValue rval = Pop(); \
- WasmValue lval = Pop(); \
- auto result = lval.to<ctype>() op rval.to<ctype>(); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- Push(WasmValue(result)); \
- break; \
- }
- FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
-#undef EXECUTE_SIMPLE_BINOP
-
-#define EXECUTE_OTHER_BINOP(name, ctype) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- ctype rval = Pop().to<ctype>(); \
- ctype lval = Pop().to<ctype>(); \
- auto result = Execute##name(lval, rval, &trap); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(WasmValue(result)); \
- break; \
- }
- FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
-#undef EXECUTE_OTHER_BINOP
-
-#define EXECUTE_UNOP(name, ctype, exec_fn) \
- case kExpr##name: { \
- TrapReason trap = kTrapCount; \
- ctype val = Pop().to<ctype>(); \
- auto result = exec_fn(val, &trap); \
- possible_nondeterminism_ |= has_nondeterminism(result); \
- if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(WasmValue(result)); \
- break; \
- }
-
-#define EXECUTE_OTHER_UNOP(name, ctype) EXECUTE_UNOP(name, ctype, Execute##name)
- FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
-#undef EXECUTE_OTHER_UNOP
-
-#define EXECUTE_I32CONV_FLOATOP(name, out_type, in_type) \
- EXECUTE_UNOP(name, in_type, ExecuteConvert<out_type>)
- FOREACH_I32CONV_FLOATOP(EXECUTE_I32CONV_FLOATOP)
-#undef EXECUTE_I32CONV_FLOATOP
-#undef EXECUTE_UNOP
-
- default:
- FATAL("Unknown or unimplemented opcode #%d:%s", code->start[pc],
- OpcodeName(code->start[pc]));
- UNREACHABLE();
- }
-
-#ifdef DEBUG
- if (!WasmOpcodes::IsControlOpcode(opcode)) {
- DCHECK_EQ(expected_new_stack_height, StackHeight());
- }
-#endif
-
- pc += len;
- if (pc == limit) {
- // Fell off end of code; do an implicit return.
- TRACE("@%-3zu: ImplicitReturn\n", pc);
- size_t arity = code->function->sig->return_count();
- DCHECK_EQ(StackHeight() - arity, frames_.back().llimit());
- if (!DoReturn(&decoder, &code, &pc, &limit, arity)) return;
- PAUSE_IF_BREAK_FLAG(AfterReturn);
- }
-#undef PAUSE_IF_BREAK_FLAG
- }
-
- state_ = WasmInterpreter::PAUSED;
- break_pc_ = hit_break ? pc : kInvalidPc;
- CommitPc(pc);
- }
-
- WasmValue Pop() {
- DCHECK_GT(frames_.size(), 0);
- DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
- StackValue stack_value = *--sp_;
- // Note that {StackHeight} depends on the current {sp} value, hence this
- // operation is split into two statements to ensure proper evaluation order.
- WasmValue val = stack_value.ExtractValue(this, StackHeight());
- stack_value.ClearValue(this, StackHeight());
- return val;
- }
-
- void Drop(int n = 1) {
- DCHECK_GE(StackHeight(), n);
- DCHECK_GT(frames_.size(), 0);
- // Check that we don't pop into locals.
- DCHECK_GE(StackHeight() - n, frames_.back().llimit());
- StackValue::ClearValues(this, StackHeight() - n, n);
- sp_ -= n;
- }
-
- WasmValue PopArity(size_t arity) {
- if (arity == 0) return WasmValue();
- CHECK_EQ(1, arity);
- return Pop();
- }
-
- void Push(WasmValue val) {
- DCHECK_NE(kWasmStmt, val.type());
- DCHECK_LE(1, stack_limit_ - sp_);
- DCHECK(StackValue::IsClearedValue(this, StackHeight()));
- StackValue stack_value(val, this, StackHeight());
- // Note that {StackHeight} depends on the current {sp} value, hence this
- // operation is split into two statements to ensure proper evaluation order.
- *sp_++ = stack_value;
- }
-
- void Push(WasmValue* vals, size_t arity) {
- DCHECK_LE(arity, stack_limit_ - sp_);
- for (WasmValue *val = vals, *end = vals + arity; val != end; ++val) {
- DCHECK_NE(kWasmStmt, val->type());
- Push(*val);
- }
- }
-
- void ResetStack(sp_t new_height) {
- DCHECK_LE(new_height, StackHeight()); // Only allowed to shrink.
- int count = static_cast<int>(StackHeight() - new_height);
- StackValue::ClearValues(this, new_height, count);
- sp_ = stack_.get() + new_height;
- }
-
- void EnsureStackSpace(size_t size) {
- if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
- size_t old_size = stack_limit_ - stack_.get();
- size_t requested_size =
- base::bits::RoundUpToPowerOfTwo64((sp_ - stack_.get()) + size);
- size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
- std::unique_ptr<StackValue[]> new_stack(new StackValue[new_size]);
- if (old_size > 0) {
- memcpy(new_stack.get(), stack_.get(), old_size * sizeof(*sp_));
- }
- sp_ = new_stack.get() + (sp_ - stack_.get());
- stack_ = std::move(new_stack);
- stack_limit_ = stack_.get() + new_size;
- // Also resize the reference stack to the same size.
- int grow_by = static_cast<int>(new_size - old_size);
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Handle<FixedArray> old_ref_stack(reference_stack(), isolate_);
- Handle<FixedArray> new_ref_stack =
- isolate_->factory()->CopyFixedArrayAndGrow(old_ref_stack, grow_by);
- new_ref_stack->FillWithHoles(static_cast<int>(old_size),
- static_cast<int>(new_size));
- reference_stack_cell_->set_value(*new_ref_stack);
- }
-
- sp_t StackHeight() { return sp_ - stack_.get(); }
-
- void TraceValueStack() {
-#ifdef DEBUG
- if (!FLAG_trace_wasm_interpreter) return;
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
- sp_t sp = top ? top->sp : 0;
- sp_t plimit = top ? top->plimit() : 0;
- sp_t llimit = top ? top->llimit() : 0;
- for (size_t i = sp; i < StackHeight(); ++i) {
- if (i < plimit) {
- PrintF(" p%zu:", i);
- } else if (i < llimit) {
- PrintF(" l%zu:", i);
- } else {
- PrintF(" s%zu:", i);
- }
- WasmValue val = GetStackValue(i);
- switch (val.type().kind()) {
- case ValueType::kI32:
- PrintF("i32:%d", val.to<int32_t>());
- break;
- case ValueType::kI64:
- PrintF("i64:%" PRId64 "", val.to<int64_t>());
- break;
- case ValueType::kF32:
- PrintF("f32:%f", val.to<float>());
- break;
- case ValueType::kF64:
- PrintF("f64:%lf", val.to<double>());
- break;
- case ValueType::kS128: {
- // This defaults to tracing all S128 values as i32x4 values for now,
- // when there is more state to know what type of values are on the
- // stack, the right format should be printed here.
- int4 s = val.to_s128().to_i32x4();
- PrintF("i32x4:%d,%d,%d,%d", s.val[0], s.val[1], s.val[2], s.val[3]);
- break;
- }
- case ValueType::kAnyRef: {
- Handle<Object> ref = val.to_anyref();
- if (ref->IsNull()) {
- PrintF("ref:null");
- } else {
- PrintF("ref:0x%" V8PRIxPTR, ref->ptr());
- }
- break;
- }
- case ValueType::kStmt:
- PrintF("void");
- break;
- case ValueType::kFuncRef:
- case ValueType::kExnRef:
- case ValueType::kNullRef:
- case ValueType::kRef:
- case ValueType::kOptRef:
- case ValueType::kEqRef:
- PrintF("(func|null|exn|opt|eq|)ref:unimplemented");
- break;
- case ValueType::kBottom:
- UNREACHABLE();
- break;
- }
- }
-#endif // DEBUG
- }
-
- ExternalCallResult TryHandleException(Isolate* isolate) {
- DCHECK(isolate->has_pending_exception()); // Assume exceptional return.
- if (HandleException(isolate) == WasmInterpreter::Thread::UNWOUND) {
- return {ExternalCallResult::EXTERNAL_UNWOUND};
- }
- return {ExternalCallResult::EXTERNAL_CAUGHT};
- }
-
- ExternalCallResult CallExternalWasmFunction(Isolate* isolate,
- Handle<Object> object_ref,
- const WasmCode* code,
- const FunctionSig* sig) {
- int num_args = static_cast<int>(sig->parameter_count());
- WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate);
-
- if (code->kind() == WasmCode::kWasmToJsWrapper &&
- !IsJSCompatibleSignature(sig, enabled_features)) {
- Drop(num_args); // Pop arguments before throwing.
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kWasmTrapTypeError));
- return TryHandleException(isolate);
- }
-
- Handle<WasmDebugInfo> debug_info(instance_object_->debug_info(), isolate);
- Handle<Code> wasm_entry = WasmDebugInfo::GetCWasmEntry(debug_info, sig);
-
- TRACE(" => Calling external wasm function\n");
-
- // Copy the arguments to one buffer.
- CWasmArgumentsPacker packer(CWasmArgumentsPacker::TotalSize(sig));
- sp_t base_index = StackHeight() - num_args;
- for (int i = 0; i < num_args; ++i) {
- WasmValue arg = GetStackValue(base_index + i);
- switch (sig->GetParam(i).kind()) {
- case ValueType::kI32:
- packer.Push(arg.to<uint32_t>());
- break;
- case ValueType::kI64:
- packer.Push(arg.to<uint64_t>());
- break;
- case ValueType::kF32:
- packer.Push(arg.to<float>());
- break;
- case ValueType::kF64:
- packer.Push(arg.to<double>());
- break;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef:
- DCHECK_IMPLIES(sig->GetParam(i) == kWasmNullRef,
- arg.to_anyref()->IsNull());
- packer.Push(arg.to_anyref()->ptr());
- break;
- default:
- UNIMPLEMENTED();
- }
- }
-
- Address call_target = code->instruction_start();
- Execution::CallWasm(isolate, wasm_entry, call_target, object_ref,
- packer.argv());
- TRACE(" => External wasm function returned%s\n",
- isolate->has_pending_exception() ? " with exception" : "");
-
- // Pop arguments off the stack.
- Drop(num_args);
-
- if (isolate->has_pending_exception()) {
- return TryHandleException(isolate);
- }
-
- // Push return values.
- packer.Reset();
- for (size_t i = 0; i < sig->return_count(); i++) {
- switch (sig->GetReturn(i).kind()) {
- case ValueType::kI32:
- Push(WasmValue(packer.Pop<uint32_t>()));
- break;
- case ValueType::kI64:
- Push(WasmValue(packer.Pop<uint64_t>()));
- break;
- case ValueType::kF32:
- Push(WasmValue(packer.Pop<float>()));
- break;
- case ValueType::kF64:
- Push(WasmValue(packer.Pop<double>()));
- break;
- case ValueType::kAnyRef:
- case ValueType::kFuncRef:
- case ValueType::kNullRef:
- case ValueType::kExnRef: {
- Handle<Object> ref(Object(packer.Pop<Address>()), isolate);
- DCHECK_IMPLIES(sig->GetReturn(i) == kWasmNullRef, ref->IsNull());
- Push(WasmValue(ref));
- break;
- }
- default:
- UNIMPLEMENTED();
- }
- }
- return {ExternalCallResult::EXTERNAL_RETURNED};
- }
-
- static WasmCode* GetTargetCode(Isolate* isolate, Address target) {
- WasmCodeManager* code_manager = isolate->wasm_engine()->code_manager();
- NativeModule* native_module = code_manager->LookupNativeModule(target);
- WasmCode* code = native_module->Lookup(target);
- if (code->kind() == WasmCode::kJumpTable) {
- uint32_t func_index =
- native_module->GetFunctionIndexFromJumpTableSlot(target);
-
- if (!native_module->HasCode(func_index)) {
- bool success = CompileLazy(isolate, native_module, func_index);
- if (!success) {
- DCHECK(isolate->has_pending_exception());
- return nullptr;
- }
- }
-
- return native_module->GetCode(func_index);
- }
- DCHECK_EQ(code->instruction_start(), target);
- return code;
- }
-
- ExternalCallResult CallImportedFunction(uint32_t function_index) {
- DCHECK_GT(module()->num_imported_functions, function_index);
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
-
- ImportedFunctionEntry entry(instance_object_, function_index);
- Handle<Object> object_ref(entry.object_ref(), isolate_);
- WasmCode* code = GetTargetCode(isolate_, entry.target());
-
- // In case a function's body is invalid and the function is lazily validated
- // and compiled we may get an exception.
- if (code == nullptr) return TryHandleException(isolate_);
-
- const FunctionSig* sig = module()->functions[function_index].sig;
- return CallExternalWasmFunction(isolate_, object_ref, code, sig);
- }
-
- ExternalCallResult CallIndirectFunction(uint32_t table_index,
- uint32_t entry_index,
- uint32_t sig_index) {
- HandleScope handle_scope(isolate_); // Avoid leaking handles.
- uint32_t expected_sig_id = module()->signature_ids[sig_index];
- DCHECK_EQ(expected_sig_id,
- module()->signature_map.Find(*module()->signature(sig_index)));
- // Bounds check against table size.
- if (entry_index >=
- static_cast<uint32_t>(WasmInstanceObject::IndirectFunctionTableSize(
- isolate_, instance_object_, table_index))) {
- return {ExternalCallResult::INVALID_FUNC};
- }
-
- IndirectFunctionTableEntry entry(instance_object_, table_index,
- entry_index);
- // Signature check.
- if (entry.sig_id() != static_cast<int32_t>(expected_sig_id)) {
- return {ExternalCallResult::SIGNATURE_MISMATCH};
- }
-
- const FunctionSig* signature = module()->signature(sig_index);
- Handle<Object> object_ref = handle(entry.object_ref(), isolate_);
- WasmCode* code = GetTargetCode(isolate_, entry.target());
-
- // In case a function's body is invalid and the function is lazily validated
- // and compiled we may get an exception.
- if (code == nullptr) return TryHandleException(isolate_);
-
- if (!object_ref->IsWasmInstanceObject() || /* call to an import */
- !instance_object_.is_identical_to(object_ref) /* cross-instance */) {
- return CallExternalWasmFunction(isolate_, object_ref, code, signature);
- }
-
- DCHECK_EQ(WasmCode::kFunction, code->kind());
- return {ExternalCallResult::INTERNAL, codemap()->GetCode(code->index())};
- }
-
- inline Activation current_activation() {
- return activations_.empty() ? Activation(0, 0) : activations_.back();
- }
-};
-
-class InterpretedFrameImpl {
- public:
- InterpretedFrameImpl(ThreadImpl* thread, int index)
- : thread_(thread), index_(index) {
- DCHECK_LE(0, index);
- }
-
- const WasmFunction* function() const { return frame()->code->function; }
-
- int pc() const {
- DCHECK_LE(0, frame()->pc);
- DCHECK_GE(kMaxInt, frame()->pc);
- return static_cast<int>(frame()->pc);
- }
-
- int GetParameterCount() const {
- DCHECK_GE(kMaxInt, function()->sig->parameter_count());
- return static_cast<int>(function()->sig->parameter_count());
- }
-
- int GetLocalCount() const {
- size_t num_locals = function()->sig->parameter_count() +
- frame()->code->locals.type_list.size();
- DCHECK_GE(kMaxInt, num_locals);
- return static_cast<int>(num_locals);
- }
-
- int GetStackHeight() const {
- bool is_top_frame =
- static_cast<size_t>(index_) + 1 == thread_->frames_.size();
- size_t stack_limit =
- is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
- DCHECK_LE(frame()->sp, stack_limit);
- size_t frame_size = stack_limit - frame()->sp;
- DCHECK_LE(GetLocalCount(), frame_size);
- return static_cast<int>(frame_size) - GetLocalCount();
- }
-
- WasmValue GetLocalValue(int index) const {
- ThreadImpl::ReferenceStackScope stack_scope(thread_);
- DCHECK_LE(0, index);
- DCHECK_GT(GetLocalCount(), index);
- return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
- }
-
- WasmValue GetStackValue(int index) const {
- ThreadImpl::ReferenceStackScope stack_scope(thread_);
- DCHECK_LE(0, index);
- // Index must be within the number of stack values of this frame.
- DCHECK_GT(GetStackHeight(), index);
- return thread_->GetStackValue(static_cast<int>(frame()->sp) +
- GetLocalCount() + index);
- }
-
- private:
- ThreadImpl* thread_;
- int index_;
-
- ThreadImpl::Frame* frame() const {
- DCHECK_GT(thread_->frames_.size(), index_);
- return &thread_->frames_[index_];
- }
-};
-
-namespace {
-
-// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
-// Thread* is the public interface, without knowledge of the object layout.
-// This cast is potentially risky, but as long as we always cast it back before
-// accessing any data, it should be fine. UBSan is not complaining.
-WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
- return reinterpret_cast<WasmInterpreter::Thread*>(impl);
-}
-ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
- return reinterpret_cast<ThreadImpl*>(thread);
-}
-
-// Same conversion for InterpretedFrame and InterpretedFrameImpl.
-InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
- return reinterpret_cast<InterpretedFrame*>(impl);
-}
-const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
- return reinterpret_cast<const InterpretedFrameImpl*>(frame);
-}
-
-} // namespace
-
-//============================================================================
-// Implementation of the pimpl idiom for WasmInterpreter::Thread.
-// Instead of placing a pointer to the ThreadImpl inside of the Thread object,
-// we just reinterpret_cast them. ThreadImpls are only allocated inside this
-// translation unit anyway.
-//============================================================================
-WasmInterpreter::State WasmInterpreter::Thread::state() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->state();
-}
-void WasmInterpreter::Thread::InitFrame(const WasmFunction* function,
- WasmValue* args) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- impl->InitFrame(function, args);
-}
-WasmInterpreter::State WasmInterpreter::Thread::Run(int num_steps) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->Run(num_steps);
-}
-void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
-void WasmInterpreter::Thread::Reset() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->Reset();
-}
-WasmInterpreter::Thread::ExceptionHandlingResult
-WasmInterpreter::Thread::RaiseException(Isolate* isolate,
- Handle<Object> exception) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->RaiseException(isolate, exception);
-}
-pc_t WasmInterpreter::Thread::GetBreakpointPc() {
- return ToImpl(this)->GetBreakpointPc();
-}
-int WasmInterpreter::Thread::GetFrameCount() {
- return ToImpl(this)->GetFrameCount();
-}
-WasmInterpreter::FramePtr WasmInterpreter::Thread::GetFrame(int index) {
- DCHECK_LE(0, index);
- DCHECK_GT(GetFrameCount(), index);
- return FramePtr(ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
-}
-WasmValue WasmInterpreter::Thread::GetReturnValue(int index) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->GetReturnValue(index);
-}
-TrapReason WasmInterpreter::Thread::GetTrapReason() {
- return ToImpl(this)->GetTrapReason();
-}
-bool WasmInterpreter::Thread::PossibleNondeterminism() {
- return ToImpl(this)->PossibleNondeterminism();
-}
-uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
- return ToImpl(this)->NumInterpretedCalls();
-}
-void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
- ToImpl(this)->AddBreakFlags(flags);
-}
-void WasmInterpreter::Thread::ClearBreakFlags() {
- ToImpl(this)->ClearBreakFlags();
-}
-uint32_t WasmInterpreter::Thread::NumActivations() {
- return ToImpl(this)->NumActivations();
-}
-uint32_t WasmInterpreter::Thread::StartActivation() {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->StartActivation();
-}
-void WasmInterpreter::Thread::FinishActivation(uint32_t id) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- impl->FinishActivation(id);
-}
-uint32_t WasmInterpreter::Thread::ActivationFrameBase(uint32_t id) {
- ThreadImpl* impl = ToImpl(this);
- ThreadImpl::ReferenceStackScope stack_scope(impl);
- return impl->ActivationFrameBase(id);
-}
-
-//============================================================================
-// The implementation details of the interpreter.
-//============================================================================
-class WasmInterpreterInternals {
- public:
- // Create a copy of the module bytes for the interpreter, since the passed
- // pointer might be invalidated after constructing the interpreter.
- const ZoneVector<uint8_t> module_bytes_;
- CodeMap codemap_;
- std::vector<ThreadImpl> threads_;
-
- WasmInterpreterInternals(Zone* zone, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance_object)
- : module_bytes_(wire_bytes.start(), wire_bytes.end(), zone),
- codemap_(module, module_bytes_.data(), zone) {
- threads_.emplace_back(zone, &codemap_, instance_object);
- }
-};
-
-namespace {
-void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
- Address* global_handle_location =
- reinterpret_cast<Address*>(data.GetParameter());
- GlobalHandles::Destroy(global_handle_location);
-}
-
-Handle<WasmInstanceObject> MakeWeak(
- Isolate* isolate, Handle<WasmInstanceObject> instance_object) {
- Handle<WasmInstanceObject> weak_instance =
- isolate->global_handles()->Create<WasmInstanceObject>(*instance_object);
- Address* global_handle_location = weak_instance.location();
- GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
- &NopFinalizer, v8::WeakCallbackType::kParameter);
- return weak_instance;
-}
-} // namespace
-
-//============================================================================
-// Implementation of the public interface of the interpreter.
-//============================================================================
-WasmInterpreter::WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance_object)
- : zone_(isolate->allocator(), ZONE_NAME),
- internals_(new WasmInterpreterInternals(
- &zone_, module, wire_bytes, MakeWeak(isolate, instance_object))) {}
-
-// The destructor is here so we can forward declare {WasmInterpreterInternals}
-// used in the {unique_ptr} in the header.
-WasmInterpreter::~WasmInterpreter() {}
-
-void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
-
-void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
-
-void WasmInterpreter::PrepareStepIn(const WasmFunction* function) {
- // Set a breakpoint at the start of function.
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- pc_t pc = code->locals.encoded_size;
- SetBreakpoint(function, pc, true);
-}
-
-bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
- bool enabled) {
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- size_t size = static_cast<size_t>(code->end - code->start);
- // Check bounds for {pc}.
- if (pc < code->locals.encoded_size || pc >= size) return false;
- // Make a copy of the code before enabling a breakpoint.
- if (enabled && code->orig_start == code->start) {
- code->start = reinterpret_cast<byte*>(zone_.New(size));
- memcpy(code->start, code->orig_start, size);
- code->end = code->start + size;
- }
- bool prev = code->start[pc] == kInternalBreakpoint;
- if (enabled) {
- code->start[pc] = kInternalBreakpoint;
- } else {
- code->start[pc] = code->orig_start[pc];
- }
- return prev;
-}
-
-bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
- InterpreterCode* code = internals_->codemap_.GetCode(function);
- size_t size = static_cast<size_t>(code->end - code->start);
- // Check bounds for {pc}.
- if (pc < code->locals.encoded_size || pc >= size) return false;
- // Check if a breakpoint is present at that place in the code.
- return code->start[pc] == kInternalBreakpoint;
-}
-
-bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
- UNIMPLEMENTED();
- return false;
-}
-
-int WasmInterpreter::GetThreadCount() {
- return 1; // only one thread for now.
-}
-
-WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
- CHECK_EQ(0, id); // only one thread for now.
- return ToThread(&internals_->threads_[id]);
-}
-
-void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
- internals_->codemap_.AddFunction(function, nullptr, nullptr);
-}
-
-void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
- const byte* start,
- const byte* end) {
- internals_->codemap_.SetFunctionCode(function, start, end);
-}
-
-ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
- Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
- // Create some dummy structures, to avoid special-casing the implementation
- // just for testing.
- FunctionSig sig(0, 0, nullptr);
- WasmFunction function{&sig, // sig
- 0, // func_index
- 0, // sig_index
- {0, 0}, // code
- false, // imported
- false, // exported
- false}; // declared
- InterpreterCode code{
- &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
-
- // Now compute and return the control transfers.
- SideTable side_table(zone, module, &code);
- return side_table.map_;
-}
-
-//============================================================================
-// Implementation of the frame inspection interface.
-//============================================================================
-const WasmFunction* InterpretedFrame::function() const {
- return ToImpl(this)->function();
-}
-int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
-int InterpretedFrame::GetParameterCount() const {
- return ToImpl(this)->GetParameterCount();
-}
-int InterpretedFrame::GetLocalCount() const {
- return ToImpl(this)->GetLocalCount();
-}
-int InterpretedFrame::GetStackHeight() const {
- return ToImpl(this)->GetStackHeight();
-}
-WasmValue InterpretedFrame::GetLocalValue(int index) const {
- return ToImpl(this)->GetLocalValue(index);
-}
-WasmValue InterpretedFrame::GetStackValue(int index) const {
- return ToImpl(this)->GetStackValue(index);
-}
-void InterpretedFrameDeleter::operator()(InterpretedFrame* ptr) {
- delete ToImpl(ptr);
-}
-
-#undef TRACE
-#undef LANE
-#undef FOREACH_INTERNAL_OPCODE
-#undef FOREACH_SIMPLE_BINOP
-#undef FOREACH_OTHER_BINOP
-#undef FOREACH_I32CONV_FLOATOP
-#undef FOREACH_OTHER_UNOP
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-interpreter.h b/chromium/v8/src/wasm/wasm-interpreter.h
deleted file mode 100644
index 5a154be6982..00000000000
--- a/chromium/v8/src/wasm/wasm-interpreter.h
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_WASM_INTERPRETER_H_
-#define V8_WASM_WASM_INTERPRETER_H_
-
-#include <memory>
-
-#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-value.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-
-namespace internal {
-class WasmInstanceObject;
-
-namespace wasm {
-
-// Forward declarations.
-struct ModuleWireBytes;
-struct WasmFunction;
-struct WasmModule;
-class WasmInterpreterInternals;
-
-using pc_t = size_t;
-using sp_t = size_t;
-using pcdiff_t = int32_t;
-using spdiff_t = uint32_t;
-
-constexpr pc_t kInvalidPc = 0x80000000;
-
-struct ControlTransferEntry {
- // Distance from the instruction to the label to jump to (forward, but can be
- // negative).
- pcdiff_t pc_diff;
- // Delta by which to decrease the stack height.
- spdiff_t sp_diff;
- // Arity of the block we jump to.
- uint32_t target_arity;
-};
-
-using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
-
-// Representation of frames within the interpreter.
-//
-// Layout of a frame:
-// -----------------
-// stack slot #N ‾\.
-// ... | stack entries: GetStackHeight(); GetStackValue()
-// stack slot #0 _/·
-// local #L ‾\.
-// ... | locals: GetLocalCount(); GetLocalValue()
-// local #P+1 |
-// param #P | ‾\.
-// ... | | parameters: GetParameterCount(); GetLocalValue()
-// param #0 _/· _/·
-// -----------------
-//
-class V8_EXPORT_PRIVATE InterpretedFrame {
- public:
- const WasmFunction* function() const;
- int pc() const;
-
- int GetParameterCount() const;
- int GetLocalCount() const;
- int GetStackHeight() const;
- WasmValue GetLocalValue(int index) const;
- WasmValue GetStackValue(int index) const;
-
- private:
- friend class WasmInterpreter;
- // Don't instante InterpretedFrames; they will be allocated as
- // InterpretedFrameImpl in the interpreter implementation.
- InterpretedFrame() = delete;
- DISALLOW_COPY_AND_ASSIGN(InterpretedFrame);
-};
-
-// Deleter struct to delete the underlying InterpretedFrameImpl without
-// violating language specifications.
-struct V8_EXPORT_PRIVATE InterpretedFrameDeleter {
- void operator()(InterpretedFrame* ptr);
-};
-
-// An interpreter capable of executing WebAssembly.
-class V8_EXPORT_PRIVATE WasmInterpreter {
- public:
- // State machine for a Thread:
- // +----------------------------------------------------------+
- // | +--------Run()/Step()---------+ |
- // V V | |
- // STOPPED ---Run()--> RUNNING ------Pause()-----+-> PAUSED <--+
- // ^ | | | | / |
- // +--- Exception ---+ | | +--- Breakpoint ---+ RaiseException() <--+
- // | | |
- // | +---------- Trap --------------> TRAPPED --------+
- // +----------- Finish -------------> FINISHED
- enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
-
- // Tells a thread to pause after certain instructions.
- enum BreakFlag : uint8_t {
- None = 0,
- AfterReturn = 1 << 0,
- AfterCall = 1 << 1
- };
-
- using FramePtr = std::unique_ptr<InterpretedFrame, InterpretedFrameDeleter>;
-
- // Representation of a thread in the interpreter.
- class V8_EXPORT_PRIVATE Thread {
- // Don't instante Threads; they will be allocated as ThreadImpl in the
- // interpreter implementation.
- Thread() = delete;
-
- public:
- enum ExceptionHandlingResult { HANDLED, UNWOUND };
-
- // Execution control.
- State state();
- void InitFrame(const WasmFunction* function, WasmValue* args);
- // Pass -1 as num_steps to run till completion, pause or breakpoint.
- State Run(int num_steps = -1);
- State Step() { return Run(1); }
- void Pause();
- void Reset();
-
- // Raise an exception in the current activation and unwind the stack
- // accordingly. Return whether the exception was handled inside wasm:
- // - HANDLED: Activation at handler position and in {PAUSED} state.
- // - UNWOUND: Frames unwound, exception pending, and in {STOPPED} state.
- ExceptionHandlingResult RaiseException(Isolate*, Handle<Object> exception);
-
- // Stack inspection and modification.
- pc_t GetBreakpointPc();
- int GetFrameCount();
- // The InterpretedFrame is only valid as long as the Thread is paused.
- FramePtr GetFrame(int index);
- WasmValue GetReturnValue(int index = 0);
- TrapReason GetTrapReason();
-
- // Returns true if the thread executed an instruction which may produce
- // nondeterministic results, e.g. float div, float sqrt, and float mul,
- // where the sign bit of a NaN is nondeterministic.
- bool PossibleNondeterminism();
-
- // Returns the number of calls / function frames executed on this thread.
- uint64_t NumInterpretedCalls();
-
- // Thread-specific breakpoints.
- // TODO(wasm): Implement this once we support multiple threads.
- // bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
- // bool GetBreakpoint(const WasmFunction* function, int pc);
-
- void AddBreakFlags(uint8_t flags);
- void ClearBreakFlags();
-
- // Each thread can have multiple activations, each represented by a portion
- // of the stack frames of this thread. StartActivation returns the id
- // (counting from 0 up) of the started activation.
- // Activations must be properly stacked, i.e. if FinishActivation is called,
- // the given id must the the latest activation on the stack.
- uint32_t NumActivations();
- uint32_t StartActivation();
- void FinishActivation(uint32_t activation_id);
- // Return the frame base of the given activation, i.e. the number of frames
- // when this activation was started.
- uint32_t ActivationFrameBase(uint32_t activation_id);
- };
-
- WasmInterpreter(Isolate* isolate, const WasmModule* module,
- const ModuleWireBytes& wire_bytes,
- Handle<WasmInstanceObject> instance);
-
- ~WasmInterpreter();
-
- //==========================================================================
- // Execution controls.
- //==========================================================================
- void Run();
- void Pause();
-
- // Prepare {function} for stepping in from Javascript.
- void PrepareStepIn(const WasmFunction* function);
-
- // Set a breakpoint at {pc} in {function} to be {enabled}. Returns the
- // previous state of the breakpoint at {pc}.
- bool SetBreakpoint(const WasmFunction* function, pc_t pc, bool enabled);
-
- // Gets the current state of the breakpoint at {function}.
- bool GetBreakpoint(const WasmFunction* function, pc_t pc);
-
- // Enable or disable tracing for {function}. Return the previous state.
- bool SetTracing(const WasmFunction* function, bool enabled);
-
- //==========================================================================
- // Thread iteration and inspection.
- //==========================================================================
- int GetThreadCount();
- Thread* GetThread(int id);
-
- //==========================================================================
- // Testing functionality.
- //==========================================================================
- // Manually adds a function to this interpreter. The func_index of the
- // function must match the current number of functions.
- void AddFunctionForTesting(const WasmFunction* function);
- // Manually adds code to the interpreter for the given function.
- void SetFunctionCodeForTesting(const WasmFunction* function,
- const byte* start, const byte* end);
-
- // Computes the control transfers for the given bytecode. Used internally in
- // the interpreter, but exposed for testing.
- static ControlTransferMap ComputeControlTransfersForTesting(
- Zone* zone, const WasmModule* module, const byte* start, const byte* end);
-
- private:
- Zone zone_;
- std::unique_ptr<WasmInterpreterInternals> internals_;
-
- DISALLOW_COPY_AND_ASSIGN(WasmInterpreter);
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_WASM_INTERPRETER_H_
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index 64719fb59a3..25109bd3969 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -93,37 +93,48 @@ class WasmStreaming::WasmStreamingImpl {
};
WasmStreaming::WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl)
- : impl_(std::move(impl)) {}
+ : impl_(std::move(impl)) {
+ TRACE_EVENT0("v8.wasm", "wasm.InitializeStreaming");
+}
// The destructor is defined here because we have a unique_ptr with forward
// declaration.
WasmStreaming::~WasmStreaming() = default;
void WasmStreaming::OnBytesReceived(const uint8_t* bytes, size_t size) {
+ TRACE_EVENT1("v8.wasm", "wasm.OnBytesReceived", "num_bytes", size);
impl_->OnBytesReceived(bytes, size);
}
-void WasmStreaming::Finish() { impl_->Finish(); }
+void WasmStreaming::Finish() {
+ TRACE_EVENT0("v8.wasm", "wasm.FinishStreaming");
+ impl_->Finish();
+}
void WasmStreaming::Abort(MaybeLocal<Value> exception) {
+ TRACE_EVENT0("v8.wasm", "wasm.AbortStreaming");
impl_->Abort(exception);
}
bool WasmStreaming::SetCompiledModuleBytes(const uint8_t* bytes, size_t size) {
+ TRACE_EVENT0("v8.wasm", "wasm.SetCompiledModuleBytes");
return impl_->SetCompiledModuleBytes(bytes, size);
}
void WasmStreaming::SetClient(std::shared_ptr<Client> client) {
+ TRACE_EVENT0("v8.wasm", "wasm.WasmStreaming.SetClient");
impl_->SetClient(client);
}
void WasmStreaming::SetUrl(const char* url, size_t length) {
+ TRACE_EVENT0("v8.wasm", "wasm.SetUrl");
impl_->SetUrl(internal::VectorOf(url, length));
}
// static
std::shared_ptr<WasmStreaming> WasmStreaming::Unpack(Isolate* isolate,
Local<Value> value) {
+ TRACE_EVENT0("v8.wasm", "wasm.WasmStreaming.Unpack");
i::HandleScope scope(reinterpret_cast<i::Isolate*>(isolate));
auto managed =
i::Handle<i::Managed<WasmStreaming>>::cast(Utils::OpenHandle(*value));
@@ -1066,16 +1077,15 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::String> string;
if (!value->ToString(context).ToLocal(&string)) return;
auto enabled_features = i::wasm::WasmFeatures::FromFlags();
+ // The JS api uses 'anyfunc' instead of 'funcref'.
if (string->StringEquals(v8_str(isolate, "anyfunc"))) {
type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "anyref"))) {
- type = i::wasm::kWasmAnyRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "nullref"))) {
- type = i::wasm::kWasmNullRef;
+ } else if (enabled_features.has_reftypes() &&
+ string->StringEquals(v8_str(isolate, "externref"))) {
+ type = i::wasm::kWasmExternRef;
} else {
- thrower.TypeError("Descriptor property 'element' must be 'anyfunc'");
+ thrower.TypeError(
+ "Descriptor property 'element' must be a WebAssembly reference type");
return;
}
}
@@ -1198,15 +1208,13 @@ bool GetValueType(Isolate* isolate, MaybeLocal<Value> maybe,
*type = i::wasm::kWasmI64;
} else if (string->StringEquals(v8_str(isolate, "f64"))) {
*type = i::wasm::kWasmF64;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "anyref"))) {
- *type = i::wasm::kWasmAnyRef;
- } else if (enabled_features.has_anyref() &&
+ } else if (enabled_features.has_reftypes() &&
+ string->StringEquals(v8_str(isolate, "externref"))) {
+ *type = i::wasm::kWasmExternRef;
+ // The JS api spec uses 'anyfunc' instead of 'funcref'.
+ } else if (enabled_features.has_reftypes() &&
string->StringEquals(v8_str(isolate, "anyfunc"))) {
*type = i::wasm::kWasmFuncRef;
- } else if (enabled_features.has_anyref() &&
- string->StringEquals(v8_str(isolate, "nullref"))) {
- *type = i::wasm::kWasmNullRef;
} else if (enabled_features.has_eh() &&
string->StringEquals(v8_str(isolate, "exnref"))) {
*type = i::wasm::kWasmExnRef;
@@ -1259,8 +1267,7 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!GetValueType(isolate, maybe, context, &type, enabled_features)) return;
if (type == i::wasm::kWasmStmt) {
thrower.TypeError(
- "Descriptor property 'value' must be 'i32', 'i64', 'f32', or "
- "'f64'");
+ "Descriptor property 'value' must be a WebAssembly type");
return;
}
}
@@ -1327,48 +1334,48 @@ void WebAssemblyGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
global_obj->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kExnRef: {
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetAnyRef(i_isolate->factory()->null_value());
- break;
- }
- global_obj->SetAnyRef(Utils::OpenHandle(*value));
- break;
- }
- case i::wasm::ValueType::kNullRef:
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetNullRef(i_isolate->factory()->null_value());
- break;
- }
- if (!global_obj->SetNullRef(Utils::OpenHandle(*value))) {
- thrower.TypeError("The value of nullref globals must be null");
- }
- break;
- case i::wasm::ValueType::kFuncRef: {
- if (args.Length() < 2) {
- // When no initial value is provided, we have to use the WebAssembly
- // default value 'null', and not the JS default value 'undefined'.
- global_obj->SetFuncRef(i_isolate, i_isolate->factory()->null_value());
- break;
- }
-
- if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
- thrower.TypeError(
- "The value of anyfunc globals must be null or an "
- "exported function");
+ case i::wasm::ValueType::kRef:
+ case i::wasm::ValueType::kOptRef: {
+ switch (type.heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapExn: {
+ if (args.Length() < 2) {
+ // When no initial value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetExternRef(i_isolate->factory()->null_value());
+ break;
+ }
+ global_obj->SetExternRef(Utils::OpenHandle(*value));
+ break;
+ }
+ case i::wasm::kHeapFunc: {
+ if (args.Length() < 2) {
+ // When no initial value is provided, we have to use the WebAssembly
+ // default value 'null', and not the JS default value 'undefined'.
+ global_obj->SetFuncRef(i_isolate,
+ i_isolate->factory()->null_value());
+ break;
+ }
+
+ if (!global_obj->SetFuncRef(i_isolate, Utils::OpenHandle(*value))) {
+ thrower.TypeError(
+ "The value of funcref globals must be null or an "
+ "exported function");
+ }
+ break;
+ }
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
}
break;
}
- case i::wasm::ValueType::kRef:
- case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
+ case i::wasm::ValueType::kRtt:
+ // TODO(7748): Implement.
UNIMPLEMENTED();
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kStmt:
case i::wasm::ValueType::kS128:
case i::wasm::ValueType::kBottom:
@@ -1589,7 +1596,7 @@ void WebAssemblyTableGetLength(
v8::Number::New(isolate, receiver->current_length()));
}
-// WebAssembly.Table.grow(num) -> num
+// WebAssembly.Table.grow(num, init_value = null) -> num
void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -1603,8 +1610,20 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- int old_size = i::WasmTableObject::Grow(i_isolate, receiver, grow_by,
- i_isolate->factory()->null_value());
+ i::Handle<i::Object> init_value = i_isolate->factory()->null_value();
+ auto enabled_features = i::wasm::WasmFeatures::FromIsolate(i_isolate);
+ if (enabled_features.has_typed_funcref()) {
+ if (args.Length() >= 2 && !args[1]->IsUndefined()) {
+ init_value = Utils::OpenHandle(*args[1]);
+ }
+ if (!i::WasmTableObject::IsValidElement(i_isolate, receiver, init_value)) {
+ thrower.TypeError("Argument 1 must be a valid type for the table");
+ return;
+ }
+ }
+
+ int old_size =
+ i::WasmTableObject::Grow(i_isolate, receiver, grow_by, init_value);
if (old_size < 0) {
thrower.RangeError("failed to grow table by %u", grow_by);
@@ -1809,22 +1828,31 @@ void WebAssemblyGlobalGetValueCommon(
case i::wasm::ValueType::kF64:
return_value.Set(receiver->GetF64());
break;
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kFuncRef:
- case i::wasm::ValueType::kNullRef:
- case i::wasm::ValueType::kExnRef:
- DCHECK_IMPLIES(receiver->type() == i::wasm::kWasmNullRef,
- receiver->GetRef()->IsNull());
- return_value.Set(Utils::ToLocal(receiver->GetRef()));
+ case i::wasm::ValueType::kS128:
+ thrower.TypeError("Can't get the value of s128 WebAssembly.Global");
break;
case i::wasm::ValueType::kRef:
case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
- UNIMPLEMENTED();
+ switch (receiver->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapFunc:
+ case i::wasm::kHeapExn:
+ return_value.Set(Utils::ToLocal(receiver->GetRef()));
+ break;
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ case i::wasm::ValueType::kRtt:
+ UNIMPLEMENTED(); // TODO(7748): Implement.
+ break;
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
@@ -1889,32 +1917,40 @@ void WebAssemblyGlobalSetValue(
receiver->SetF64(f64_value);
break;
}
- case i::wasm::ValueType::kAnyRef:
- case i::wasm::ValueType::kExnRef: {
- receiver->SetAnyRef(Utils::OpenHandle(*args[0]));
- break;
- }
- case i::wasm::ValueType::kNullRef:
- if (!receiver->SetNullRef(Utils::OpenHandle(*args[0]))) {
- thrower.TypeError("The value of nullref must be null");
- }
- break;
- case i::wasm::ValueType::kFuncRef: {
- if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
- thrower.TypeError(
- "value of an anyfunc reference must be either null or an "
- "exported function");
- }
+ case i::wasm::ValueType::kS128:
+ thrower.TypeError("Can't set the value of s128 WebAssembly.Global");
break;
- }
case i::wasm::ValueType::kRef:
case i::wasm::ValueType::kOptRef:
- case i::wasm::ValueType::kEqRef:
- // TODO(7748): Implement these.
+ switch (receiver->type().heap_type()) {
+ case i::wasm::kHeapExtern:
+ case i::wasm::kHeapExn:
+ receiver->SetExternRef(Utils::OpenHandle(*args[0]));
+ break;
+ case i::wasm::kHeapFunc: {
+ if (!receiver->SetFuncRef(i_isolate, Utils::OpenHandle(*args[0]))) {
+ thrower.TypeError(
+ "value of an funcref reference must be either null or an "
+ "exported function");
+ }
+ break;
+ }
+
+ case i::wasm::kHeapEq:
+ default:
+ // TODO(7748): Implement these.
+ UNIMPLEMENTED();
+ break;
+ }
+ break;
+ case i::wasm::ValueType::kRtt:
+ // TODO(7748): Implement.
UNIMPLEMENTED();
+ break;
+ case i::wasm::ValueType::kI8:
+ case i::wasm::ValueType::kI16:
case i::wasm::ValueType::kBottom:
case i::wasm::ValueType::kStmt:
- case i::wasm::ValueType::kS128:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc
index bcfc49dcbaa..8ea63ef4b8e 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.cc
+++ b/chromium/v8/src/wasm/wasm-module-builder.cc
@@ -414,8 +414,11 @@ void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
void WriteValueType(ZoneBuffer* buffer, const ValueType& type) {
buffer->write_u8(type.value_type_code());
- if (type.has_immediate()) {
- buffer->write_u32v(type.ref_index());
+ if (type.has_depth()) {
+ buffer->write_u32v(type.depth());
+ }
+ if (type.encoding_needs_heap_type()) {
+ buffer->write_u32v(type.heap_type_code());
}
}
@@ -450,8 +453,9 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
StructType* struct_type = type.struct_type;
buffer->write_u8(kWasmStructTypeCode);
buffer->write_size(struct_type->field_count());
- for (auto field : struct_type->fields()) {
- WriteValueType(buffer, field);
+ for (uint32_t i = 0; i < struct_type->field_count(); i++) {
+ WriteValueType(buffer, struct_type->field(i));
+ buffer->write_u8(struct_type->mutability(i) ? 1 : 0);
}
break;
}
@@ -459,6 +463,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
ArrayType* array_type = type.array_type;
buffer->write_u8(kWasmArrayTypeCode);
WriteValueType(buffer, array_type->element_type());
+ buffer->write_u8(array_type->mutability() ? 1 : 0);
break;
}
}
@@ -564,6 +569,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
break;
case WasmInitExpr::kRefNullConst:
buffer->write_u8(kExprRefNull);
+ WriteValueType(buffer, global.type);
break;
case WasmInitExpr::kRefFuncConst:
UNIMPLEMENTED();
@@ -590,12 +596,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_f64(0.);
break;
case ValueType::kOptRef:
- case ValueType::kFuncRef:
- case ValueType::kExnRef:
- case ValueType::kEqRef:
buffer->write_u8(kExprRefNull);
break;
- default:
+ case ValueType::kI8:
+ case ValueType::kI16:
+ case ValueType::kStmt:
+ case ValueType::kS128:
+ case ValueType::kBottom:
+ case ValueType::kRef:
+ case ValueType::kRtt:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index 5111a783728..405586107a2 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -50,8 +50,11 @@ LazilyGeneratedNames::LookupNameFromImportsAndExports(
Vector<const WasmImport> import_table,
Vector<const WasmExport> export_table) const {
base::MutexGuard lock(&mutex_);
- DCHECK(kind == kExternalGlobal || kind == kExternalMemory);
- auto& names = kind == kExternalGlobal ? global_names_ : memory_names_;
+ DCHECK(kind == kExternalGlobal || kind == kExternalMemory ||
+ kind == kExternalTable);
+ auto& names = kind == kExternalGlobal
+ ? global_names_
+ : kind == kExternalMemory ? memory_names_ : table_names_;
if (!names) {
names.reset(
new std::unordered_map<uint32_t,
@@ -215,7 +218,16 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
}
WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
- : signature_zone(std::move(signature_zone)) {}
+ : signature_zone(std::move(signature_zone)),
+ subtyping_cache(this->signature_zone.get() == nullptr
+ ? nullptr
+ : new ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>(
+ this->signature_zone.get())),
+ type_equivalence_cache(
+ this->signature_zone.get() == nullptr
+ ? nullptr
+ : new ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>(
+ this->signature_zone.get())) {}
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
@@ -239,50 +251,10 @@ namespace {
// Converts the given {type} into a string representation that can be used in
// reflective functions. Should be kept in sync with the {GetValueType} helper.
Handle<String> ToValueTypeString(Isolate* isolate, ValueType type) {
- // TODO(ahaas/jkummerow): This could be as simple as:
- // return isolate->factory()->InternalizeUtf8String(type.type_name());
- // if we clean up all occurrences of "anyfunc" in favor of "funcref".
- Factory* factory = isolate->factory();
- Handle<String> string;
- switch (type.kind()) {
- case i::wasm::ValueType::kI32: {
- string = factory->InternalizeUtf8String("i32");
- break;
- }
- case i::wasm::ValueType::kI64: {
- string = factory->InternalizeUtf8String("i64");
- break;
- }
- case i::wasm::ValueType::kF32: {
- string = factory->InternalizeUtf8String("f32");
- break;
- }
- case i::wasm::ValueType::kF64: {
- string = factory->InternalizeUtf8String("f64");
- break;
- }
- case i::wasm::ValueType::kAnyRef: {
- string = factory->InternalizeUtf8String("anyref");
- break;
- }
- case i::wasm::ValueType::kFuncRef: {
- string = factory->InternalizeUtf8String("anyfunc");
- break;
- }
- case i::wasm::ValueType::kNullRef: {
- string = factory->InternalizeUtf8String("nullref");
- break;
- }
- case i::wasm::ValueType::kExnRef: {
- string = factory->InternalizeUtf8String("exnref");
- break;
- }
- default:
- UNREACHABLE();
- }
- return string;
+ return isolate->factory()->InternalizeUtf8String(
+ type == kWasmFuncRef ? CStrVector("anyfunc")
+ : VectorOf(type.type_name()));
}
-
} // namespace
Handle<JSObject> GetTypeForFunction(Isolate* isolate, const FunctionSig* sig) {
@@ -357,13 +329,14 @@ Handle<JSObject> GetTypeForTable(Isolate* isolate, ValueType type,
Factory* factory = isolate->factory();
Handle<String> element;
- if (type == kWasmFuncRef) {
- // TODO(wasm): We should define the "anyfunc" string in one central place
- // and then use that constant everywhere.
+ if (type.is_reference_to(kHeapFunc)) {
+ // TODO(wasm): We should define the "anyfunc" string in one central
+ // place and then use that constant everywhere.
element = factory->InternalizeUtf8String("anyfunc");
} else {
- DCHECK(WasmFeatures::FromFlags().has_anyref() && type == kWasmAnyRef);
- element = factory->InternalizeUtf8String("anyref");
+ DCHECK(WasmFeatures::FromFlags().has_reftypes() &&
+ type.is_reference_to(kHeapExtern));
+ element = factory->InternalizeUtf8String("externref");
}
Handle<JSFunction> object_function = isolate->object_function();
@@ -458,9 +431,8 @@ Handle<JSArray> GetImports(Isolate* isolate,
case kExternalException:
import_kind = exception_string;
break;
- default:
- UNREACHABLE();
}
+ DCHECK(!import_kind->is_null());
Handle<String> import_module =
WasmModuleObject::ExtractUtf8StringFromModuleBytes(
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index a189964ad73..f0f8db890b4 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -15,6 +15,7 @@
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
@@ -117,7 +118,7 @@ struct WasmElemSegment {
// Construct an active segment.
WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
- : type(kWasmFuncRef),
+ : type(ValueType::Ref(kHeapFunc, kNullable)),
table_index(table_index),
offset(offset),
status(kStatusActive) {}
@@ -125,7 +126,7 @@ struct WasmElemSegment {
// Construct a passive or declarative segment, which has no table index or
// offset.
explicit WasmElemSegment(bool declarative)
- : type(kWasmFuncRef),
+ : type(ValueType::Ref(kHeapFunc, kNullable)),
table_index(0),
status(declarative ? kStatusDeclarative : kStatusPassive) {}
@@ -206,7 +207,7 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
void AddForTesting(int function_index, WireBytesRef name);
private:
- // {function_names_}, {global_names_} and {memory_names_} are
+ // {function_names_}, {global_names_}, {memory_names_} and {table_names_} are
// populated lazily after decoding, and therefore need a mutex to protect
// concurrent modifications from multiple {WasmModuleObject}.
mutable base::Mutex mutex_;
@@ -218,6 +219,9 @@ class V8_EXPORT_PRIVATE LazilyGeneratedNames {
mutable std::unique_ptr<
std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
memory_names_;
+ mutable std::unique_ptr<
+ std::unordered_map<uint32_t, std::pair<WireBytesRef, WireBytesRef>>>
+ table_names_;
};
class V8_EXPORT_PRIVATE AsmJsOffsetInformation {
@@ -327,6 +331,28 @@ struct V8_EXPORT_PRIVATE WasmModule {
bool has_array(uint32_t index) const {
return index < types.size() && type_kinds[index] == kWasmArrayTypeCode;
}
+ bool is_cached_subtype(uint32_t subtype, uint32_t supertype) const {
+ return subtyping_cache->count(std::make_pair(subtype, supertype)) == 1;
+ }
+ void cache_subtype(uint32_t subtype, uint32_t supertype) const {
+ subtyping_cache->emplace(subtype, supertype);
+ }
+ void uncache_subtype(uint32_t subtype, uint32_t supertype) const {
+ subtyping_cache->erase(std::make_pair(subtype, supertype));
+ }
+ bool is_cached_equivalent_type(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ return type_equivalence_cache->count(std::make_pair(type1, type2)) == 1;
+ }
+ void cache_type_equivalence(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ type_equivalence_cache->emplace(type1, type2);
+ }
+ void uncache_type_equivalence(uint32_t type1, uint32_t type2) const {
+ if (type1 > type2) std::swap(type1, type2);
+ type_equivalence_cache->erase(std::make_pair(type1, type2));
+ }
+
std::vector<WasmFunction> functions;
std::vector<WasmDataSegment> data_segments;
std::vector<WasmTable> tables;
@@ -347,6 +373,15 @@ struct V8_EXPORT_PRIVATE WasmModule {
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
+ private:
+ // Cache for discovered subtyping pairs.
+ std::unique_ptr<ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>>
+ subtyping_cache;
+ // Cache for discovered equivalent type pairs.
+ // Indexes are stored in increasing order.
+ std::unique_ptr<ZoneUnorderedSet<std::pair<uint32_t, uint32_t>>>
+ type_equivalence_cache;
+
DISALLOW_COPY_AND_ASSIGN(WasmModule);
};
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index 93234493445..d832be25b83 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -30,7 +30,6 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
-OBJECT_CONSTRUCTORS_IMPL(WasmDebugInfo, Struct)
OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
@@ -40,9 +39,6 @@ OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
-NEVER_READ_ONLY_SPACE_IMPL(WasmDebugInfo)
-
-CAST_ACCESSOR(WasmDebugInfo)
CAST_ACCESSOR(WasmExceptionObject)
CAST_ACCESSOR(WasmExportedFunctionData)
CAST_ACCESSOR(WasmGlobalObject)
@@ -126,20 +122,22 @@ ACCESSORS(WasmGlobalObject, untagged_buffer, JSArrayBuffer,
kUntaggedBufferOffset)
ACCESSORS(WasmGlobalObject, tagged_buffer, FixedArray, kTaggedBufferOffset)
SMI_ACCESSORS(WasmGlobalObject, offset, kOffsetOffset)
-SMI_ACCESSORS(WasmGlobalObject, flags, kFlagsOffset)
+// TODO(7748): This will not suffice to hold the 32-bit encoding of a ValueType.
+// We need to devise and encoding that does, and also encodes is_mutable.
+SMI_ACCESSORS(WasmGlobalObject, raw_type, kRawTypeOffset)
+SMI_ACCESSORS(WasmGlobalObject, is_mutable, kIsMutableOffset)
+
wasm::ValueType WasmGlobalObject::type() const {
- return wasm::ValueType(TypeBits::decode(flags()));
+ return wasm::ValueType::FromRawBitField(raw_type());
}
void WasmGlobalObject::set_type(wasm::ValueType value) {
- set_flags(TypeBits::update(flags(), value.kind()));
+ set_raw_type(static_cast<int>(value.raw_bit_field()));
}
-BIT_FIELD_ACCESSORS(WasmGlobalObject, flags, is_mutable,
- WasmGlobalObject::IsMutableBit)
int WasmGlobalObject::type_size() const { return type().element_size_bytes(); }
Address WasmGlobalObject::address() const {
- DCHECK_NE(type(), wasm::kWasmAnyRef);
+ DCHECK_NE(type(), wasm::kWasmExternRef);
DCHECK_LE(offset() + type_size(), untagged_buffer().byte_length());
return Address(untagged_buffer().backing_store()) + offset();
}
@@ -161,8 +159,8 @@ double WasmGlobalObject::GetF64() {
}
Handle<Object> WasmGlobalObject::GetRef() {
- // We use this getter for anyref, funcref, and exnref.
- DCHECK(type().IsReferenceType());
+ // We use this getter for externref, funcref, and exnref.
+ DCHECK(type().is_reference_type());
return handle(tagged_buffer().get(offset()), GetIsolate());
}
@@ -182,21 +180,13 @@ void WasmGlobalObject::SetF64(double value) {
base::WriteLittleEndianValue<double>(address(), value);
}
-void WasmGlobalObject::SetAnyRef(Handle<Object> value) {
- // We use this getter anyref and exnref.
- DCHECK(type() == wasm::kWasmAnyRef || type() == wasm::kWasmExnRef);
+void WasmGlobalObject::SetExternRef(Handle<Object> value) {
+ // We use this getter externref and exnref.
+ DCHECK(type().is_reference_to(wasm::kHeapExtern) ||
+ type().is_reference_to(wasm::kHeapExn));
tagged_buffer().set(offset(), *value);
}
-bool WasmGlobalObject::SetNullRef(Handle<Object> value) {
- DCHECK_EQ(type(), wasm::kWasmNullRef);
- if (!value->IsNull()) {
- return false;
- }
- tagged_buffer().set(offset(), *value);
- return true;
-}
-
bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
DCHECK_EQ(type(), wasm::kWasmFuncRef);
if (!value->IsNull(isolate) &&
@@ -253,8 +243,6 @@ OPTIONAL_ACCESSORS(WasmInstanceObject, tagged_globals_buffer, FixedArray,
kTaggedGlobalsBufferOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, imported_mutable_globals_buffers,
FixedArray, kImportedMutableGlobalsBuffersOffset)
-OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, WasmDebugInfo,
- kDebugInfoOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, tables, FixedArray, kTablesOffset)
OPTIONAL_ACCESSORS(WasmInstanceObject, indirect_function_tables, FixedArray,
kIndirectFunctionTablesOffset)
@@ -391,23 +379,15 @@ OPTIONAL_ACCESSORS(WasmIndirectFunctionTable, managed_native_allocations,
Foreign, kManagedNativeAllocationsOffset)
ACCESSORS(WasmIndirectFunctionTable, refs, FixedArray, kRefsOffset)
-// WasmDebugInfo
-ACCESSORS(WasmDebugInfo, wasm_instance, WasmInstanceObject, kInstanceOffset)
-ACCESSORS(WasmDebugInfo, interpreter_handle, Object, kInterpreterHandleOffset)
-ACCESSORS(WasmDebugInfo, interpreter_reference_stack, Cell,
- kInterpreterReferenceStackOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entries, FixedArray,
- kCWasmEntriesOffset)
-OPTIONAL_ACCESSORS(WasmDebugInfo, c_wasm_entry_map, Managed<wasm::SignatureMap>,
- kCWasmEntryMapOffset)
-
#undef OPTIONAL_ACCESSORS
#undef READ_PRIMITIVE_FIELD
#undef WRITE_PRIMITIVE_FIELD
#undef PRIMITIVE_ACCESSORS
wasm::ValueType WasmTableObject::type() {
- return wasm::ValueType(static_cast<wasm::ValueType::Kind>(raw_type()));
+ // TODO(7748): Support non-nullable tables?
+ return wasm::ValueType::Ref(static_cast<wasm::HeapType>(raw_type()),
+ wasm::kNullable);
}
bool WasmMemoryObject::has_maximum_pages() { return maximum_pages() >= 0; }
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index 28834678893..ae9d64b956d 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -283,7 +283,9 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate,
table_obj->set_entries(*backing_store);
table_obj->set_current_length(initial);
table_obj->set_maximum_length(*max);
- table_obj->set_raw_type(static_cast<int>(type.kind()));
+ // TODO(7748): Make this work with other table types.
+ CHECK(type.is_nullable());
+ table_obj->set_raw_type(static_cast<int>(type.heap_type()));
table_obj->set_dispatch_tables(ReadOnlyRoots(isolate).empty_fixed_array());
if (entries != nullptr) {
@@ -384,14 +386,10 @@ bool WasmTableObject::IsValidElement(Isolate* isolate,
Handle<WasmTableObject> table,
Handle<Object> entry) {
// Anyref and exnref tables take everything.
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
return true;
}
- // Nullref only takes {null}.
- if (table->type() == wasm::kWasmNullRef) {
- return entry->IsNull(isolate);
- }
// FuncRef tables can store {null}, {WasmExportedFunction}, {WasmJSFunction},
// or {WasmCapiFunction} objects.
if (entry->IsNull(isolate)) return true;
@@ -409,8 +407,8 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
Handle<FixedArray> entries(table->entries(), isolate);
// The FixedArray is addressed with int's.
int entry_index = static_cast<int>(index);
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
entries->set(entry_index, *entry);
return;
}
@@ -454,9 +452,9 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
Handle<Object> entry(entries->get(entry_index), isolate);
- // First we handle the easy anyref and exnref table case.
- if (table->type() == wasm::kWasmAnyRef ||
- table->type() == wasm::kWasmExnRef) {
+ // First we handle the easy externref and exnref table case.
+ if (table->type().heap_type() == wasm::kHeapExtern ||
+ table->type().heap_type() == wasm::kHeapExn) {
return entry;
}
@@ -634,7 +632,7 @@ void WasmTableObject::GetFunctionTableEntry(
Isolate* isolate, Handle<WasmTableObject> table, int entry_index,
bool* is_valid, bool* is_null, MaybeHandle<WasmInstanceObject>* instance,
int* function_index, MaybeHandle<WasmJSFunction>* maybe_js_function) {
- DCHECK_EQ(table->type(), wasm::kWasmFuncRef);
+ DCHECK_EQ(table->type().heap_type(), wasm::kHeapFunc);
DCHECK_LT(entry_index, table->current_length());
// We initialize {is_valid} with {true}. We may change it later.
*is_valid = true;
@@ -856,7 +854,7 @@ void WasmMemoryObject::update_instances(Isolate* isolate,
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
+ TRACE_EVENT0("v8.wasm", "wasm.GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
// Any buffer used as an asmjs memory cannot be detached, and
// therefore this memory cannot be grown.
@@ -951,13 +949,13 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
// Disallow GC until all fields have acceptable types.
DisallowHeapAllocation no_gc;
- global_obj->set_flags(0);
+ global_obj->set_raw_type(0);
global_obj->set_type(type);
global_obj->set_offset(offset);
global_obj->set_is_mutable(is_mutable);
}
- if (type.IsReferenceType()) {
+ if (type.is_reference_type()) {
DCHECK(maybe_untagged_buffer.is_null());
Handle<FixedArray> tagged_buffer;
if (!maybe_tagged_buffer.ToHandle(&tagged_buffer)) {
@@ -1175,16 +1173,6 @@ const WasmModule* WasmInstanceObject::module() {
return module_object().module();
}
-Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
- Handle<WasmInstanceObject> instance) {
- if (instance->has_debug_info()) {
- return handle(instance->debug_info(), instance->GetIsolate());
- }
- Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
- DCHECK(instance->has_debug_info());
- return new_info;
-}
-
Handle<WasmInstanceObject> WasmInstanceObject::New(
Isolate* isolate, Handle<WasmModuleObject> module_object) {
Handle<JSFunction> instance_cons(
@@ -1483,7 +1471,7 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
// static
uint8_t* WasmInstanceObject::GetGlobalStorage(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
- DCHECK(!global.type.IsReferenceType());
+ DCHECK(!global.type.is_reference_type());
if (global.mutability && global.imported) {
return reinterpret_cast<byte*>(
instance->imported_mutable_globals()[global.index]);
@@ -1496,7 +1484,7 @@ uint8_t* WasmInstanceObject::GetGlobalStorage(
std::pair<Handle<FixedArray>, uint32_t>
WasmInstanceObject::GetGlobalBufferAndIndex(Handle<WasmInstanceObject> instance,
const wasm::WasmGlobal& global) {
- DCHECK(global.type.IsReferenceType());
+ DCHECK(global.type.is_reference_type());
Isolate* isolate = instance->GetIsolate();
if (global.mutability && global.imported) {
Handle<FixedArray> buffer(
@@ -1522,10 +1510,19 @@ MaybeHandle<String> WasmInstanceObject::GetGlobalNameOrNull(
// static
MaybeHandle<String> WasmInstanceObject::GetMemoryNameOrNull(
Isolate* isolate, Handle<WasmInstanceObject> instance,
- uint32_t global_index) {
+ uint32_t memory_index) {
return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
isolate, instance, wasm::ImportExportKindCode::kExternalMemory,
- global_index);
+ memory_index);
+}
+
+// static
+MaybeHandle<String> WasmInstanceObject::GetTableNameOrNull(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t table_index) {
+ return WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalTable,
+ table_index);
}
// static
@@ -1533,7 +1530,8 @@ MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
Isolate* isolate, Handle<WasmInstanceObject> instance,
wasm::ImportExportKindCode kind, uint32_t index) {
DCHECK(kind == wasm::ImportExportKindCode::kExternalGlobal ||
- kind == wasm::ImportExportKindCode::kExternalMemory);
+ kind == wasm::ImportExportKindCode::kExternalMemory ||
+ kind == wasm::ImportExportKindCode::kExternalTable);
wasm::ModuleWireBytes wire_bytes(
instance->module_object().native_module()->wire_bytes());
@@ -1562,7 +1560,7 @@ MaybeHandle<String> WasmInstanceObject::GetNameFromImportsAndExportsOrNull(
wasm::WasmValue WasmInstanceObject::GetGlobalValue(
Handle<WasmInstanceObject> instance, const wasm::WasmGlobal& global) {
Isolate* isolate = instance->GetIsolate();
- if (global.type.IsReferenceType()) {
+ if (global.type.is_reference_type()) {
Handle<FixedArray> global_buffer; // The buffer of the global.
uint32_t global_index = 0; // The index into the buffer.
std::tie(global_buffer, global_index) =
@@ -1727,17 +1725,15 @@ uint32_t WasmExceptionPackage::GetEncodedSize(
DCHECK_EQ(8, ComputeEncodedElementSize(sig->GetParam(i)));
encoded_size += 8;
break;
- case wasm::ValueType::kAnyRef:
- case wasm::ValueType::kFuncRef:
- case wasm::ValueType::kNullRef:
- case wasm::ValueType::kExnRef:
case wasm::ValueType::kRef:
case wasm::ValueType::kOptRef:
- case wasm::ValueType::kEqRef:
encoded_size += 1;
break;
+ case wasm::ValueType::kRtt:
case wasm::ValueType::kStmt:
case wasm::ValueType::kBottom:
+ case wasm::ValueType::kI8:
+ case wasm::ValueType::kI16:
UNREACHABLE();
}
}
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 217bd50d154..f8ead0fe3e7 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -42,7 +42,6 @@ class BreakPoint;
class JSArrayBuffer;
class SeqOneByteString;
class WasmCapiFunction;
-class WasmDebugInfo;
class WasmExceptionTag;
class WasmExportedFunction;
class WasmExternalFunction;
@@ -67,7 +66,7 @@ class Managed;
// - object = target instance, if a Wasm function, tuple if imported
// - sig_id = signature id of function
// - target = entrypoint to Wasm code or import wrapper code
-class IndirectFunctionTableEntry {
+class V8_EXPORT_PRIVATE IndirectFunctionTableEntry {
public:
inline IndirectFunctionTableEntry(Handle<WasmInstanceObject>, int table_index,
int entry_index);
@@ -76,9 +75,8 @@ class IndirectFunctionTableEntry {
int entry_index);
void clear();
- V8_EXPORT_PRIVATE void Set(int sig_id,
- Handle<WasmInstanceObject> target_instance,
- int target_func_index);
+ void Set(int sig_id, Handle<WasmInstanceObject> target_instance,
+ int target_func_index);
void Set(int sig_id, Address call_target, Object ref);
Object object_ref() const;
@@ -324,16 +322,16 @@ class WasmGlobalObject : public JSObject {
DECL_ACCESSORS(untagged_buffer, JSArrayBuffer)
DECL_ACCESSORS(tagged_buffer, FixedArray)
DECL_INT32_ACCESSORS(offset)
- DECL_INT_ACCESSORS(flags)
+ DECL_INT_ACCESSORS(raw_type)
DECL_PRIMITIVE_ACCESSORS(type, wasm::ValueType)
- DECL_BOOLEAN_ACCESSORS(is_mutable)
+ // TODO(7748): Once we improve the encoding of mutability/type, turn this back
+ // into a boolean accessor.
+ DECL_INT_ACCESSORS(is_mutable)
// Dispatched behavior.
DECL_PRINTER(WasmGlobalObject)
DECL_VERIFIER(WasmGlobalObject)
- DEFINE_TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FLAGS()
-
DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
TORQUE_GENERATED_WASM_GLOBAL_OBJECT_FIELDS)
@@ -354,8 +352,7 @@ class WasmGlobalObject : public JSObject {
inline void SetI64(int64_t value);
inline void SetF32(float value);
inline void SetF64(double value);
- inline void SetAnyRef(Handle<Object> value);
- inline bool SetNullRef(Handle<Object> value);
+ inline void SetExternRef(Handle<Object> value);
inline bool SetFuncRef(Isolate* isolate, Handle<Object> value);
private:
@@ -368,7 +365,7 @@ class WasmGlobalObject : public JSObject {
};
// Representation of a WebAssembly.Instance JavaScript-level object.
-class WasmInstanceObject : public JSObject {
+class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
public:
DECL_CAST(WasmInstanceObject)
@@ -379,7 +376,6 @@ class WasmInstanceObject : public JSObject {
DECL_OPTIONAL_ACCESSORS(untagged_globals_buffer, JSArrayBuffer)
DECL_OPTIONAL_ACCESSORS(tagged_globals_buffer, FixedArray)
DECL_OPTIONAL_ACCESSORS(imported_mutable_globals_buffers, FixedArray)
- DECL_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo)
DECL_OPTIONAL_ACCESSORS(tables, FixedArray)
DECL_OPTIONAL_ACCESSORS(indirect_function_tables, FixedArray)
DECL_ACCESSORS(imported_function_refs, FixedArray)
@@ -441,7 +437,6 @@ class WasmInstanceObject : public JSObject {
V(kUntaggedGlobalsBufferOffset, kTaggedSize) \
V(kTaggedGlobalsBufferOffset, kTaggedSize) \
V(kImportedMutableGlobalsBuffersOffset, kTaggedSize) \
- V(kDebugInfoOffset, kTaggedSize) \
V(kTablesOffset, kTaggedSize) \
V(kIndirectFunctionTablesOffset, kTaggedSize) \
V(kManagedNativeAllocationsOffset, kTaggedSize) \
@@ -480,7 +475,6 @@ class WasmInstanceObject : public JSObject {
kUntaggedGlobalsBufferOffset,
kTaggedGlobalsBufferOffset,
kImportedMutableGlobalsBuffersOffset,
- kDebugInfoOffset,
kTablesOffset,
kIndirectFunctionTablesOffset,
kManagedNativeAllocationsOffset,
@@ -488,21 +482,15 @@ class WasmInstanceObject : public JSObject {
kWasmExternalFunctionsOffset,
kManagedObjectMapsOffset};
- V8_EXPORT_PRIVATE const wasm::WasmModule* module();
+ const wasm::WasmModule* module();
- V8_EXPORT_PRIVATE static bool EnsureIndirectFunctionTableWithMinimumSize(
+ static bool EnsureIndirectFunctionTableWithMinimumSize(
Handle<WasmInstanceObject> instance, int table_index,
uint32_t minimum_size);
- V8_EXPORT_PRIVATE void SetRawMemory(byte* mem_start, size_t mem_size);
-
- // Get the debug info associated with the given wasm object.
- // If no debug info exists yet, it is created automatically.
- V8_EXPORT_PRIVATE static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
- Handle<WasmInstanceObject>);
+ void SetRawMemory(byte* mem_start, size_t mem_size);
- V8_EXPORT_PRIVATE static Handle<WasmInstanceObject> New(
- Isolate*, Handle<WasmModuleObject>);
+ static Handle<WasmInstanceObject> New(Isolate*, Handle<WasmModuleObject>);
Address GetCallTarget(uint32_t func_index);
@@ -536,10 +524,9 @@ class WasmInstanceObject : public JSObject {
// cache of the given {instance}, or creates a new {WasmExportedFunction} if
// it does not exist yet. The new {WasmExportedFunction} is added to the
// cache of the {instance} immediately.
- V8_EXPORT_PRIVATE static Handle<WasmExternalFunction>
- GetOrCreateWasmExternalFunction(Isolate* isolate,
- Handle<WasmInstanceObject> instance,
- int function_index);
+ static Handle<WasmExternalFunction> GetOrCreateWasmExternalFunction(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ int function_index);
static void SetWasmExternalFunction(Isolate* isolate,
Handle<WasmInstanceObject> instance,
@@ -578,6 +565,11 @@ class WasmInstanceObject : public JSObject {
Handle<WasmInstanceObject>,
uint32_t memory_index);
+ // Get the name of a table in the given instance by index.
+ static MaybeHandle<String> GetTableNameOrNull(Isolate*,
+ Handle<WasmInstanceObject>,
+ uint32_t table_index);
+
OBJECT_CONSTRUCTORS(WasmInstanceObject, JSObject);
private:
@@ -619,7 +611,7 @@ class WasmExceptionObject : public JSObject {
};
// A Wasm exception that has been thrown out of Wasm code.
-class WasmExceptionPackage : public JSReceiver {
+class V8_EXPORT_PRIVATE WasmExceptionPackage : public JSReceiver {
public:
static Handle<WasmExceptionPackage> New(
Isolate* isolate, Handle<WasmExceptionTag> exception_tag,
@@ -812,42 +804,6 @@ class WasmJSFunctionData : public Struct {
OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
};
-// Debug info used for wasm debugging in the interpreter. For Liftoff debugging,
-// all information is held off-heap in {wasm::DebugInfo}.
-class WasmDebugInfo : public Struct {
- public:
- NEVER_READ_ONLY_SPACE
- DECL_ACCESSORS(wasm_instance, WasmInstanceObject)
- DECL_ACCESSORS(interpreter_handle, Object) // Foreign or undefined
- DECL_ACCESSORS(interpreter_reference_stack, Cell)
- DECL_OPTIONAL_ACCESSORS(c_wasm_entries, FixedArray)
- DECL_OPTIONAL_ACCESSORS(c_wasm_entry_map, Managed<wasm::SignatureMap>)
-
- DECL_CAST(WasmDebugInfo)
-
- // Dispatched behavior.
- DECL_PRINTER(WasmDebugInfo)
- DECL_VERIFIER(WasmDebugInfo)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_WASM_DEBUG_INFO_FIELDS)
-
- static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
-
- // Setup a WasmDebugInfo with an existing WasmInstance struct.
- // Returns a pointer to the interpreter instantiated inside this
- // WasmDebugInfo.
- // Use for testing only.
- V8_EXPORT_PRIVATE static wasm::WasmInterpreter* SetupForTesting(
- Handle<WasmInstanceObject>);
-
- V8_EXPORT_PRIVATE static Handle<Code> GetCWasmEntry(Handle<WasmDebugInfo>,
- const wasm::FunctionSig*);
-
- OBJECT_CONSTRUCTORS(WasmDebugInfo, Struct);
-};
-
class WasmScript : public AllStatic {
public:
// Set a breakpoint on the given byte position inside the given module.
diff --git a/chromium/v8/src/wasm/wasm-objects.tq b/chromium/v8/src/wasm/wasm-objects.tq
index e611ced16ef..cbaa35b47d9 100644
--- a/chromium/v8/src/wasm/wasm-objects.tq
+++ b/chromium/v8/src/wasm/wasm-objects.tq
@@ -41,14 +41,6 @@ extern class WasmIndirectFunctionTable extends Struct {
refs: FixedArray;
}
-extern class WasmDebugInfo extends Struct {
- instance: WasmInstanceObject;
- interpreter_handle: Foreign|Undefined;
- interpreter_reference_stack: Cell;
- c_wasm_entries: FixedArray|Undefined;
- c_wasm_entry_map: Foreign|Undefined; // Managed<wasm::SignatureMap>
-}
-
@generateCppClass
extern class WasmExceptionTag extends Struct {
// Note that this index is only useful for debugging purposes and it is not
@@ -78,16 +70,13 @@ extern class WasmMemoryObject extends JSObject {
}
type WasmValueType extends uint8 constexpr 'wasm::ValueType::Kind';
-bitfield struct WasmGlobalObjectFlags extends uint31 {
- Type: WasmValueType: 8 bit; // "type" is a reserved word.
- is_mutable: bool: 1 bit;
-}
extern class WasmGlobalObject extends JSObject {
untagged_buffer: JSArrayBuffer|Undefined;
tagged_buffer: FixedArray|Undefined;
offset: Smi;
- flags: SmiTagged<WasmGlobalObjectFlags>;
+ raw_type: Smi;
+ is_mutable: Smi;
}
extern class WasmExceptionObject extends JSObject {
diff --git a/chromium/v8/src/wasm/wasm-opcodes-inl.h b/chromium/v8/src/wasm/wasm-opcodes-inl.h
new file mode 100644
index 00000000000..2d9268a9bc7
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-opcodes-inl.h
@@ -0,0 +1,631 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_OPCODES_INL_H_
+#define V8_WASM_WASM_OPCODES_INL_H_
+
+#include <array>
+
+#include "src/base/template-utils.h"
+#include "src/codegen/signature.h"
+#include "src/execution/messages.h"
+#include "src/runtime/runtime.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define CASE_OP(name, str) \
+ case kExpr##name: \
+ return str;
+#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
+#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
+#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
+#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
+#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
+#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
+#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
+#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
+#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
+#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
+#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
+#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
+#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
+#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_V64x2_OP(name, str) CASE_OP(V64x2##name, "v64x2." str)
+#define CASE_V32x4_OP(name, str) CASE_OP(V32x4##name, "v32x4." str)
+#define CASE_V16x8_OP(name, str) CASE_OP(V16x8##name, "v16x8." str)
+#define CASE_V8x16_OP(name, str) CASE_OP(V8x16##name, "v8x16." str)
+#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
+#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
+#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
+#define CASE_SIMD_OP(name, str) \
+ CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+ CASE_I8x16_OP(name, str)
+#define CASE_SIMDF_OP(name, str) \
+ CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
+#define CASE_SIMDI_OP(name, str) \
+ CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
+#define CASE_SIMDV_OP(name, str) \
+ CASE_V32x4_OP(name, str) CASE_V16x8_OP(name, str) CASE_V8x16_OP(name, str)
+#define CASE_SIGN_OP(TYPE, name, str) \
+ CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_ALL_SIGN_OP(name, str) \
+ CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
+#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
+#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
+ CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
+#define CASE_L32_OP(name, str) \
+ CASE_SIGN_OP(I32, name##8, str "8") \
+ CASE_SIGN_OP(I32, name##16, str "16") \
+ CASE_I32_OP(name, str "32")
+#define CASE_U32_OP(name, str) \
+ CASE_I32_OP(name, str "32") \
+ CASE_UNSIGNED_OP(I32, name##8, str "8") \
+ CASE_UNSIGNED_OP(I32, name##16, str "16")
+#define CASE_UNSIGNED_ALL_OP(name, str) \
+ CASE_U32_OP(name, str) \
+ CASE_I64_OP(name, str "64") \
+ CASE_UNSIGNED_OP(I64, name##8, str "8") \
+ CASE_UNSIGNED_OP(I64, name##16, str "16") \
+ CASE_UNSIGNED_OP(I64, name##32, str "32")
+
+// static
+constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+ // clang-format off
+
+ // Standard opcodes
+ CASE_INT_OP(Eqz, "eqz")
+ CASE_ALL_OP(Eq, "eq")
+ CASE_ALL_OP(Ne, "ne")
+ CASE_ALL_OP(Add, "add")
+ CASE_ALL_OP(Sub, "sub")
+ CASE_ALL_OP(Mul, "mul")
+ CASE_ALL_SIGN_OP(Lt, "lt")
+ CASE_ALL_SIGN_OP(Gt, "gt")
+ CASE_ALL_SIGN_OP(Le, "le")
+ CASE_ALL_SIGN_OP(Ge, "ge")
+ CASE_INT_OP(Clz, "clz")
+ CASE_INT_OP(Ctz, "ctz")
+ CASE_INT_OP(Popcnt, "popcnt")
+ CASE_ALL_SIGN_OP(Div, "div")
+ CASE_SIGN_OP(INT, Rem, "rem")
+ CASE_INT_OP(And, "and")
+ CASE_INT_OP(Ior, "or")
+ CASE_INT_OP(Xor, "xor")
+ CASE_INT_OP(Shl, "shl")
+ CASE_SIGN_OP(INT, Shr, "shr")
+ CASE_INT_OP(Rol, "rol")
+ CASE_INT_OP(Ror, "ror")
+ CASE_FLOAT_OP(Abs, "abs")
+ CASE_FLOAT_OP(Neg, "neg")
+ CASE_FLOAT_OP(Ceil, "ceil")
+ CASE_FLOAT_OP(Floor, "floor")
+ CASE_FLOAT_OP(Trunc, "trunc")
+ CASE_FLOAT_OP(NearestInt, "nearest")
+ CASE_FLOAT_OP(Sqrt, "sqrt")
+ CASE_FLOAT_OP(Min, "min")
+ CASE_FLOAT_OP(Max, "max")
+ CASE_FLOAT_OP(CopySign, "copysign")
+ CASE_REF_OP(Null, "null")
+ CASE_REF_OP(IsNull, "is_null")
+ CASE_REF_OP(Func, "func")
+ CASE_REF_OP(AsNonNull, "as_non_null")
+ CASE_I32_OP(ConvertI64, "wrap_i64")
+ CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
+ CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
+ CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
+ CASE_F32_OP(ConvertF64, "demote_f64")
+ CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
+ CASE_F64_OP(ConvertF32, "promote_f32")
+ CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
+ CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
+ CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
+ CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
+ CASE_INT_OP(SExtendI8, "extend8_s")
+ CASE_INT_OP(SExtendI16, "extend16_s")
+ CASE_I64_OP(SExtendI32, "extend32_s")
+ CASE_OP(Unreachable, "unreachable")
+ CASE_OP(Nop, "nop")
+ CASE_OP(Block, "block")
+ CASE_OP(Loop, "loop")
+ CASE_OP(If, "if")
+ CASE_OP(Else, "else")
+ CASE_OP(End, "end")
+ CASE_OP(Br, "br")
+ CASE_OP(BrIf, "br_if")
+ CASE_OP(BrTable, "br_table")
+ CASE_OP(Return, "return")
+ CASE_OP(CallFunction, "call")
+ CASE_OP(CallIndirect, "call_indirect")
+ CASE_OP(ReturnCall, "return_call")
+ CASE_OP(ReturnCallIndirect, "return_call_indirect")
+ CASE_OP(BrOnNull, "br_on_null")
+ CASE_OP(Drop, "drop")
+ CASE_OP(Select, "select")
+ CASE_OP(SelectWithType, "select")
+ CASE_OP(LocalGet, "local.get")
+ CASE_OP(LocalSet, "local.set")
+ CASE_OP(LocalTee, "local.tee")
+ CASE_OP(GlobalGet, "global.get")
+ CASE_OP(GlobalSet, "global.set")
+ CASE_OP(TableGet, "table.get")
+ CASE_OP(TableSet, "table.set")
+ CASE_ALL_OP(Const, "const")
+ CASE_OP(MemorySize, "memory.size")
+ CASE_OP(MemoryGrow, "memory.grow")
+ CASE_ALL_OP(LoadMem, "load")
+ CASE_SIGN_OP(INT, LoadMem8, "load8")
+ CASE_SIGN_OP(INT, LoadMem16, "load16")
+ CASE_SIGN_OP(I64, LoadMem32, "load32")
+ CASE_S128_OP(LoadMem, "load128")
+ CASE_ALL_OP(StoreMem, "store")
+ CASE_INT_OP(StoreMem8, "store8")
+ CASE_INT_OP(StoreMem16, "store16")
+ CASE_I64_OP(StoreMem32, "store32")
+ CASE_S128_OP(StoreMem, "store128")
+
+ // Exception handling opcodes.
+ CASE_OP(Try, "try")
+ CASE_OP(Catch, "catch")
+ CASE_OP(Throw, "throw")
+ CASE_OP(Rethrow, "rethrow")
+ CASE_OP(BrOnExn, "br_on_exn")
+
+ // asm.js-only opcodes.
+ CASE_F64_OP(Acos, "acos")
+ CASE_F64_OP(Asin, "asin")
+ CASE_F64_OP(Atan, "atan")
+ CASE_F64_OP(Cos, "cos")
+ CASE_F64_OP(Sin, "sin")
+ CASE_F64_OP(Tan, "tan")
+ CASE_F64_OP(Exp, "exp")
+ CASE_F64_OP(Log, "log")
+ CASE_F64_OP(Atan2, "atan2")
+ CASE_F64_OP(Pow, "pow")
+ CASE_F64_OP(Mod, "mod")
+ CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
+ CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
+ CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
+ CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
+ CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
+ CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
+ CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
+ CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
+ CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
+ CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
+
+ // Numeric Opcodes.
+ CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
+ CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
+ CASE_OP(MemoryInit, "memory.init")
+ CASE_OP(DataDrop, "data.drop")
+ CASE_OP(MemoryCopy, "memory.copy")
+ CASE_OP(MemoryFill, "memory.fill")
+ CASE_OP(TableInit, "table.init")
+ CASE_OP(ElemDrop, "elem.drop")
+ CASE_OP(TableCopy, "table.copy")
+ CASE_OP(TableGrow, "table.grow")
+ CASE_OP(TableSize, "table.size")
+ CASE_OP(TableFill, "table.fill")
+
+ // SIMD opcodes.
+ CASE_SIMD_OP(Splat, "splat")
+ CASE_SIMD_OP(Neg, "neg")
+ CASE_SIMDF_OP(Sqrt, "sqrt")
+ CASE_SIMD_OP(Eq, "eq")
+ CASE_SIMD_OP(Ne, "ne")
+ CASE_SIMD_OP(Add, "add")
+ CASE_SIMD_OP(Sub, "sub")
+ CASE_SIMD_OP(Mul, "mul")
+ CASE_SIMDF_OP(Div, "div")
+ CASE_SIMDF_OP(Lt, "lt")
+ CASE_SIMDF_OP(Le, "le")
+ CASE_SIMDF_OP(Gt, "gt")
+ CASE_SIMDF_OP(Ge, "ge")
+ CASE_SIMDF_OP(Abs, "abs")
+ CASE_F32x4_OP(AddHoriz, "add_horizontal")
+ CASE_F32x4_OP(RecipApprox, "recip_approx")
+ CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
+ CASE_SIMDF_OP(Min, "min")
+ CASE_SIMDF_OP(Max, "max")
+ CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
+ CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
+ CASE_SIMDF_OP(ExtractLane, "extract_lane")
+ CASE_SIMDF_OP(ReplaceLane, "replace_lane")
+ CASE_I64x2_OP(ExtractLane, "extract_lane")
+ CASE_I64x2_OP(ReplaceLane, "replace_lane")
+ CASE_I32x4_OP(ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
+ CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
+ CASE_SIMDI_OP(ReplaceLane, "replace_lane")
+ CASE_SIGN_OP(SIMDI, Min, "min")
+ CASE_SIGN_OP(I64x2, Min, "min")
+ CASE_SIGN_OP(SIMDI, Max, "max")
+ CASE_SIGN_OP(I64x2, Max, "max")
+ CASE_SIGN_OP(SIMDI, Lt, "lt")
+ CASE_SIGN_OP(I64x2, Lt, "lt")
+ CASE_SIGN_OP(SIMDI, Le, "le")
+ CASE_SIGN_OP(I64x2, Le, "le")
+ CASE_SIGN_OP(SIMDI, Gt, "gt")
+ CASE_SIGN_OP(I64x2, Gt, "gt")
+ CASE_SIGN_OP(SIMDI, Ge, "ge")
+ CASE_SIGN_OP(I64x2, Ge, "ge")
+ CASE_SIGN_OP(SIMDI, Shr, "shr")
+ CASE_SIGN_OP(I64x2, Shr, "shr")
+ CASE_SIMDI_OP(Shl, "shl")
+ CASE_I64x2_OP(Shl, "shl")
+ CASE_I32x4_OP(AddHoriz, "add_horizontal")
+ CASE_I16x8_OP(AddHoriz, "add_horizontal")
+ CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
+ CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
+ CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+ CASE_S128_OP(And, "and")
+ CASE_S128_OP(Or, "or")
+ CASE_S128_OP(Xor, "xor")
+ CASE_S128_OP(Not, "not")
+ CASE_S128_OP(Select, "select")
+ CASE_S128_OP(AndNot, "andnot")
+ CASE_S8x16_OP(Swizzle, "swizzle")
+ CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_SIMDV_OP(AnyTrue, "any_true")
+ CASE_SIMDV_OP(AllTrue, "all_true")
+ CASE_V64x2_OP(AnyTrue, "any_true")
+ CASE_V64x2_OP(AllTrue, "all_true")
+ CASE_SIMDF_OP(Qfma, "qfma")
+ CASE_SIMDF_OP(Qfms, "qfms")
+
+ CASE_S8x16_OP(LoadSplat, "load_splat")
+ CASE_S16x8_OP(LoadSplat, "load_splat")
+ CASE_S32x4_OP(LoadSplat, "load_splat")
+ CASE_S64x2_OP(LoadSplat, "load_splat")
+ CASE_I16x8_OP(Load8x8S, "load8x8_s")
+ CASE_I16x8_OP(Load8x8U, "load8x8_u")
+ CASE_I32x4_OP(Load16x4S, "load16x4_s")
+ CASE_I32x4_OP(Load16x4U, "load16x4_u")
+ CASE_I64x2_OP(Load32x2S, "load32x2_s")
+ CASE_I64x2_OP(Load32x2U, "load32x2_u")
+
+ CASE_I8x16_OP(RoundingAverageU, "avgr_u")
+ CASE_I16x8_OP(RoundingAverageU, "avgr_u")
+
+ CASE_I8x16_OP(Abs, "abs")
+ CASE_I16x8_OP(Abs, "abs")
+ CASE_I32x4_OP(Abs, "abs")
+
+ CASE_I8x16_OP(BitMask, "bitmask")
+ CASE_I16x8_OP(BitMask, "bitmask")
+ CASE_I32x4_OP(BitMask, "bitmask")
+
+ CASE_F32x4_OP(Pmin, "pmin")
+ CASE_F32x4_OP(Pmax, "pmax")
+ CASE_F64x2_OP(Pmin, "pmin")
+ CASE_F64x2_OP(Pmax, "pmax")
+
+ CASE_F32x4_OP(Ceil, "ceil")
+ CASE_F32x4_OP(Floor, "floor")
+ CASE_F32x4_OP(Trunc, "trunc")
+ CASE_F32x4_OP(NearestInt, "nearest")
+ CASE_F64x2_OP(Ceil, "ceil")
+ CASE_F64x2_OP(Floor, "floor")
+ CASE_F64x2_OP(Trunc, "trunc")
+ CASE_F64x2_OP(NearestInt, "nearest")
+
+ CASE_I32x4_OP(DotI16x8S, "dot_i16x8_s")
+
+ // Atomic operations.
+ CASE_OP(AtomicNotify, "atomic.notify")
+ CASE_INT_OP(AtomicWait, "atomic.wait")
+ CASE_OP(AtomicFence, "atomic.fence")
+ CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
+ CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
+ CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
+ CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic.sub")
+ CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic.and")
+ CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic.or")
+ CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic.xor")
+ CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
+ CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
+
+ // GC operations.
+ CASE_OP(StructNew, "struct.new")
+ CASE_OP(StructNewSub, "struct.new_sub")
+ CASE_OP(StructNewDefault, "struct.new_default")
+ CASE_OP(StructGet, "struct.get")
+ CASE_OP(StructGetS, "struct.get_s")
+ CASE_OP(StructGetU, "struct.get_u")
+ CASE_OP(StructSet, "struct.set")
+ CASE_OP(ArrayNew, "array.new")
+ CASE_OP(ArrayNewSub, "array.new_sub")
+ CASE_OP(ArrayNewDefault, "array.new_default")
+ CASE_OP(ArrayGet, "array.get")
+ CASE_OP(ArrayGetS, "array.get_s")
+ CASE_OP(ArrayGetU, "array.get_u")
+ CASE_OP(ArrayLen, "array.len")
+ CASE_OP(ArraySet, "array.set")
+ CASE_OP(I31New, "i31.new")
+ CASE_OP(I31GetS, "i31.get_s")
+ CASE_OP(I31GetU, "i31.get_u")
+ CASE_OP(RttCanon, "rtt.canon")
+ CASE_OP(RttSub, "rtt.sub")
+ CASE_OP(RefTest, "ref.test")
+ CASE_OP(RefCast, "ref.cast")
+ CASE_OP(BrOnCast, "br_on_cast")
+ CASE_OP(RefEq, "ref.eq")
+ CASE_OP(Let, "let")
+
+
+ case kNumericPrefix:
+ case kSimdPrefix:
+ case kAtomicPrefix:
+ case kGCPrefix:
+ return "unknown";
+ // clang-format on
+ }
+ // Even though the switch above handles all well-defined enum values,
+ // random modules (e.g. fuzzer generated) can call this function with
+ // random (invalid) opcodes. Handle those here:
+ return "invalid opcode";
+}
+
+#undef CASE_OP
+#undef CASE_I32_OP
+#undef CASE_I64_OP
+#undef CASE_F32_OP
+#undef CASE_F64_OP
+#undef CASE_REF_OP
+#undef CASE_F64x2_OP
+#undef CASE_F32x4_OP
+#undef CASE_I64x2_OP
+#undef CASE_I32x4_OP
+#undef CASE_I16x8_OP
+#undef CASE_I8x16_OP
+#undef CASE_S128_OP
+#undef CASE_S64x2_OP
+#undef CASE_S32x4_OP
+#undef CASE_S16x8_OP
+#undef CASE_S8x16_OP
+#undef CASE_INT_OP
+#undef CASE_FLOAT_OP
+#undef CASE_ALL_OP
+#undef CASE_SIMD_OP
+#undef CASE_SIMDI_OP
+#undef CASE_SIGN_OP
+#undef CASE_UNSIGNED_OP
+#undef CASE_UNSIGNED_ALL_OP
+#undef CASE_ALL_SIGN_OP
+#undef CASE_CONVERT_OP
+#undef CASE_CONVERT_SAT_OP
+#undef CASE_L32_OP
+#undef CASE_U32_OP
+
+// static
+constexpr bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
+ FOREACH_PREFIX(CHECK_PREFIX)
+#undef CHECK_PREFIX
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
+ FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprUnreachable:
+ case kExprBr:
+ case kExprBrTable:
+ case kExprReturn:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsBreakable(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprBlock:
+ case kExprTry:
+ case kExprCatch:
+ case kExprLoop:
+ case kExprElse:
+ return false;
+ default:
+ return true;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsExternRefOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprRefNull:
+ case kExprRefIsNull:
+ case kExprRefFunc:
+ case kExprRefAsNonNull:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
+ // TODO(8729): Trapping opcodes are not yet considered to be throwing.
+ switch (opcode) {
+ case kExprThrow:
+ case kExprRethrow:
+ case kExprCallFunction:
+ case kExprCallIndirect:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// static
+constexpr bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
+ FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ return true;
+ default:
+ return false;
+ }
+}
+
+namespace impl {
+
+#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
+enum WasmOpcodeSig : byte {
+ kSigEnum_None,
+ FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
+};
+#undef DECLARE_SIG_ENUM
+#define DECLARE_SIG(name, ...) \
+ constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
+ constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
+ constexpr FunctionSig kSig_##name( \
+ kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
+ kTypes_##name + (1 - kReturnsCount_##name));
+FOREACH_SIGNATURE(DECLARE_SIG)
+#undef DECLARE_SIG
+
+#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
+constexpr const FunctionSig* kCachedSigs[] = {
+ nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
+#undef DECLARE_SIG_ENTRY
+
+constexpr WasmOpcodeSig GetShortOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
+ return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
+ kSigEnum_None;
+#undef CASE
+}
+
+constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
+#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
+ return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
+#undef CASE
+}
+
+constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
+ base::make_array<256>(GetShortOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
+ base::make_array<256>(GetAsmJsOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
+ base::make_array<256>(GetSimdOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
+ base::make_array<256>(GetAtomicOpcodeSigIndex);
+constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
+ base::make_array<256>(GetNumericOpcodeSigIndex);
+
+} // namespace impl
+
+constexpr const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
+ switch (opcode >> 8) {
+ case 0:
+ return impl::kCachedSigs[impl::kShortSigTable[opcode]];
+ case kSimdPrefix:
+ return impl::kCachedSigs[impl::kSimdExprSigTable[opcode & 0xFF]];
+ case kAtomicPrefix:
+ return impl::kCachedSigs[impl::kAtomicExprSigTable[opcode & 0xFF]];
+ case kNumericPrefix:
+ return impl::kCachedSigs[impl::kNumericExprSigTable[opcode & 0xFF]];
+ default:
+#if V8_HAS_CXX14_CONSTEXPR
+ UNREACHABLE(); // invalid prefix.
+#else
+ return nullptr;
+#endif
+ }
+}
+
+constexpr const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
+ CONSTEXPR_DCHECK(opcode < impl::kSimpleAsmjsExprSigTable.size());
+ return impl::kCachedSigs[impl::kSimpleAsmjsExprSigTable[opcode]];
+}
+
+constexpr MessageTemplate WasmOpcodes::TrapReasonToMessageId(
+ TrapReason reason) {
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case k##name: \
+ return MessageTemplate::kWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+ default:
+ return MessageTemplate::kNone;
+ }
+}
+
+const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
+ return MessageFormatter::TemplateString(TrapReasonToMessageId(reason));
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_OPCODES_INL_H_
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
index 53869e86a58..1bf29e241ee 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.cc
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -6,500 +6,14 @@
#include <array>
-#include "src/base/template-utils.h"
#include "src/codegen/signature.h"
-#include "src/execution/messages.h"
-#include "src/runtime/runtime.h"
#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
namespace wasm {
-#define CASE_OP(name, str) \
- case kExpr##name: \
- return str;
-#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
-#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
-#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
-#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
-#define CASE_REF_OP(name, str) CASE_OP(Ref##name, "ref." str)
-#define CASE_F64x2_OP(name, str) CASE_OP(F64x2##name, "f64x2." str)
-#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
-#define CASE_I64x2_OP(name, str) CASE_OP(I64x2##name, "i64x2." str)
-#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
-#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
-#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
-#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
-#define CASE_S64x2_OP(name, str) CASE_OP(S64x2##name, "s64x2." str)
-#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
-#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
-#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
-#define CASE_S1x2_OP(name, str) CASE_OP(S1x2##name, "s1x2." str)
-#define CASE_S1x4_OP(name, str) CASE_OP(S1x4##name, "s1x4." str)
-#define CASE_S1x8_OP(name, str) CASE_OP(S1x8##name, "s1x8." str)
-#define CASE_S1x16_OP(name, str) CASE_OP(S1x16##name, "s1x16." str)
-#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
-#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
-#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
-#define CASE_SIMD_OP(name, str) \
- CASE_F64x2_OP(name, str) CASE_I64x2_OP(name, str) CASE_F32x4_OP(name, str) \
- CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
- CASE_I8x16_OP(name, str)
-#define CASE_SIMDF_OP(name, str) \
- CASE_F32x4_OP(name, str) CASE_F64x2_OP(name, str)
-#define CASE_SIMDI_OP(name, str) \
- CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
-#define CASE_SIGN_OP(TYPE, name, str) \
- CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
-#define CASE_UNSIGNED_OP(TYPE, name, str) CASE_##TYPE##_OP(name##U, str "_u")
-#define CASE_ALL_SIGN_OP(name, str) \
- CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
-#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##SRC, str "_" src_suffix "_u") \
- CASE_##RES##_OP(S##name##SRC, str "_" src_suffix "_s")
-#define CASE_CONVERT_SAT_OP(name, RES, SRC, src_suffix, str) \
- CASE_##RES##_OP(U##name##Sat##SRC, str "_sat_" src_suffix "_u") \
- CASE_##RES##_OP(S##name##Sat##SRC, str "_sat_" src_suffix "_s")
-#define CASE_L32_OP(name, str) \
- CASE_SIGN_OP(I32, name##8, str "8") \
- CASE_SIGN_OP(I32, name##16, str "16") \
- CASE_I32_OP(name, str "32")
-#define CASE_U32_OP(name, str) \
- CASE_I32_OP(name, str "32") \
- CASE_UNSIGNED_OP(I32, name##8, str "8") \
- CASE_UNSIGNED_OP(I32, name##16, str "16")
-#define CASE_UNSIGNED_ALL_OP(name, str) \
- CASE_U32_OP(name, str) \
- CASE_I64_OP(name, str "64") \
- CASE_UNSIGNED_OP(I64, name##8, str "8") \
- CASE_UNSIGNED_OP(I64, name##16, str "16") \
- CASE_UNSIGNED_OP(I64, name##32, str "32")
-
-const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
- switch (opcode) {
- // clang-format off
-
- // Standard opcodes
- CASE_INT_OP(Eqz, "eqz")
- CASE_ALL_OP(Eq, "eq")
- CASE_ALL_OP(Ne, "ne")
- CASE_ALL_OP(Add, "add")
- CASE_ALL_OP(Sub, "sub")
- CASE_ALL_OP(Mul, "mul")
- CASE_ALL_SIGN_OP(Lt, "lt")
- CASE_ALL_SIGN_OP(Gt, "gt")
- CASE_ALL_SIGN_OP(Le, "le")
- CASE_ALL_SIGN_OP(Ge, "ge")
- CASE_INT_OP(Clz, "clz")
- CASE_INT_OP(Ctz, "ctz")
- CASE_INT_OP(Popcnt, "popcnt")
- CASE_ALL_SIGN_OP(Div, "div")
- CASE_SIGN_OP(INT, Rem, "rem")
- CASE_INT_OP(And, "and")
- CASE_INT_OP(Ior, "or")
- CASE_INT_OP(Xor, "xor")
- CASE_INT_OP(Shl, "shl")
- CASE_SIGN_OP(INT, Shr, "shr")
- CASE_INT_OP(Rol, "rol")
- CASE_INT_OP(Ror, "ror")
- CASE_FLOAT_OP(Abs, "abs")
- CASE_FLOAT_OP(Neg, "neg")
- CASE_FLOAT_OP(Ceil, "ceil")
- CASE_FLOAT_OP(Floor, "floor")
- CASE_FLOAT_OP(Trunc, "trunc")
- CASE_FLOAT_OP(NearestInt, "nearest")
- CASE_FLOAT_OP(Sqrt, "sqrt")
- CASE_FLOAT_OP(Min, "min")
- CASE_FLOAT_OP(Max, "max")
- CASE_FLOAT_OP(CopySign, "copysign")
- CASE_REF_OP(Null, "null")
- CASE_REF_OP(IsNull, "is_null")
- CASE_REF_OP(Func, "func")
- CASE_REF_OP(AsNonNull, "as_non_null")
- CASE_I32_OP(ConvertI64, "wrap_i64")
- CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
- CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
- CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
- CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
- CASE_F32_OP(ConvertF64, "demote_f64")
- CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
- CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
- CASE_F64_OP(ConvertF32, "promote_f32")
- CASE_I32_OP(ReinterpretF32, "reinterpret_f32")
- CASE_I64_OP(ReinterpretF64, "reinterpret_f64")
- CASE_F32_OP(ReinterpretI32, "reinterpret_i32")
- CASE_F64_OP(ReinterpretI64, "reinterpret_i64")
- CASE_INT_OP(SExtendI8, "extend8_s")
- CASE_INT_OP(SExtendI16, "extend16_s")
- CASE_I64_OP(SExtendI32, "extend32_s")
- CASE_OP(Unreachable, "unreachable")
- CASE_OP(Nop, "nop")
- CASE_OP(Block, "block")
- CASE_OP(Loop, "loop")
- CASE_OP(If, "if")
- CASE_OP(Else, "else")
- CASE_OP(End, "end")
- CASE_OP(Br, "br")
- CASE_OP(BrIf, "br_if")
- CASE_OP(BrTable, "br_table")
- CASE_OP(Return, "return")
- CASE_OP(CallFunction, "call")
- CASE_OP(CallIndirect, "call_indirect")
- CASE_OP(ReturnCall, "return_call")
- CASE_OP(ReturnCallIndirect, "return_call_indirect")
- CASE_OP(BrOnNull, "br_on_null")
- CASE_OP(Drop, "drop")
- CASE_OP(Select, "select")
- CASE_OP(SelectWithType, "select")
- CASE_OP(LocalGet, "local.get")
- CASE_OP(LocalSet, "local.set")
- CASE_OP(LocalTee, "local.tee")
- CASE_OP(GlobalGet, "global.get")
- CASE_OP(GlobalSet, "global.set")
- CASE_OP(TableGet, "table.get")
- CASE_OP(TableSet, "table.set")
- CASE_ALL_OP(Const, "const")
- CASE_OP(MemorySize, "memory.size")
- CASE_OP(MemoryGrow, "memory.grow")
- CASE_ALL_OP(LoadMem, "load")
- CASE_SIGN_OP(INT, LoadMem8, "load8")
- CASE_SIGN_OP(INT, LoadMem16, "load16")
- CASE_SIGN_OP(I64, LoadMem32, "load32")
- CASE_S128_OP(LoadMem, "load128")
- CASE_ALL_OP(StoreMem, "store")
- CASE_INT_OP(StoreMem8, "store8")
- CASE_INT_OP(StoreMem16, "store16")
- CASE_I64_OP(StoreMem32, "store32")
- CASE_S128_OP(StoreMem, "store128")
-
- // Exception handling opcodes.
- CASE_OP(Try, "try")
- CASE_OP(Catch, "catch")
- CASE_OP(Throw, "throw")
- CASE_OP(Rethrow, "rethrow")
- CASE_OP(BrOnExn, "br_on_exn")
-
- // asm.js-only opcodes.
- CASE_F64_OP(Acos, "acos")
- CASE_F64_OP(Asin, "asin")
- CASE_F64_OP(Atan, "atan")
- CASE_F64_OP(Cos, "cos")
- CASE_F64_OP(Sin, "sin")
- CASE_F64_OP(Tan, "tan")
- CASE_F64_OP(Exp, "exp")
- CASE_F64_OP(Log, "log")
- CASE_F64_OP(Atan2, "atan2")
- CASE_F64_OP(Pow, "pow")
- CASE_F64_OP(Mod, "mod")
- CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
- CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
- CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
- CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
- CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
- CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
- CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
- CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
- CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
- CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
- CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_f32_s")
- CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_f32_u")
- CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_f64_s")
- CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_f64_u")
-
- // Numeric Opcodes.
- CASE_CONVERT_SAT_OP(Convert, I32, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I32, F64, "f64", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F32, "f32", "trunc")
- CASE_CONVERT_SAT_OP(Convert, I64, F64, "f64", "trunc")
- CASE_OP(MemoryInit, "memory.init")
- CASE_OP(DataDrop, "data.drop")
- CASE_OP(MemoryCopy, "memory.copy")
- CASE_OP(MemoryFill, "memory.fill")
- CASE_OP(TableInit, "table.init")
- CASE_OP(ElemDrop, "elem.drop")
- CASE_OP(TableCopy, "table.copy")
- CASE_OP(TableGrow, "table.grow")
- CASE_OP(TableSize, "table.size")
- CASE_OP(TableFill, "table.fill")
-
- // SIMD opcodes.
- CASE_SIMD_OP(Splat, "splat")
- CASE_SIMD_OP(Neg, "neg")
- CASE_SIMDF_OP(Sqrt, "sqrt")
- CASE_SIMD_OP(Eq, "eq")
- CASE_SIMD_OP(Ne, "ne")
- CASE_SIMD_OP(Add, "add")
- CASE_SIMD_OP(Sub, "sub")
- CASE_SIMD_OP(Mul, "mul")
- CASE_SIMDF_OP(Div, "div")
- CASE_SIMDF_OP(Lt, "lt")
- CASE_SIMDF_OP(Le, "le")
- CASE_SIMDF_OP(Gt, "gt")
- CASE_SIMDF_OP(Ge, "ge")
- CASE_SIMDF_OP(Abs, "abs")
- CASE_F32x4_OP(AddHoriz, "add_horizontal")
- CASE_F32x4_OP(RecipApprox, "recip_approx")
- CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
- CASE_SIMDF_OP(Min, "min")
- CASE_SIMDF_OP(Max, "max")
- CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I32x4, I16x8High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I32x4, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16Low, "i32", "convert")
- CASE_CONVERT_OP(Convert, I16x8, I8x16High, "i32", "convert")
- CASE_CONVERT_OP(Convert, I8x16, I16x8, "i32", "convert")
- CASE_SIMDF_OP(ExtractLane, "extract_lane")
- CASE_SIMDF_OP(ReplaceLane, "replace_lane")
- CASE_I64x2_OP(ExtractLane, "extract_lane")
- CASE_I64x2_OP(ReplaceLane, "replace_lane")
- CASE_I32x4_OP(ExtractLane, "extract_lane")
- CASE_SIGN_OP(I16x8, ExtractLane, "extract_lane")
- CASE_SIGN_OP(I8x16, ExtractLane, "extract_lane")
- CASE_SIMDI_OP(ReplaceLane, "replace_lane")
- CASE_SIGN_OP(SIMDI, Min, "min")
- CASE_SIGN_OP(I64x2, Min, "min")
- CASE_SIGN_OP(SIMDI, Max, "max")
- CASE_SIGN_OP(I64x2, Max, "max")
- CASE_SIGN_OP(SIMDI, Lt, "lt")
- CASE_SIGN_OP(I64x2, Lt, "lt")
- CASE_SIGN_OP(SIMDI, Le, "le")
- CASE_SIGN_OP(I64x2, Le, "le")
- CASE_SIGN_OP(SIMDI, Gt, "gt")
- CASE_SIGN_OP(I64x2, Gt, "gt")
- CASE_SIGN_OP(SIMDI, Ge, "ge")
- CASE_SIGN_OP(I64x2, Ge, "ge")
- CASE_SIGN_OP(SIMDI, Shr, "shr")
- CASE_SIGN_OP(I64x2, Shr, "shr")
- CASE_SIMDI_OP(Shl, "shl")
- CASE_I64x2_OP(Shl, "shl")
- CASE_I32x4_OP(AddHoriz, "add_horizontal")
- CASE_I16x8_OP(AddHoriz, "add_horizontal")
- CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
- CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
- CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
- CASE_S128_OP(And, "and")
- CASE_S128_OP(Or, "or")
- CASE_S128_OP(Xor, "xor")
- CASE_S128_OP(Not, "not")
- CASE_S128_OP(Select, "select")
- CASE_S128_OP(AndNot, "andnot")
- CASE_S8x16_OP(Swizzle, "swizzle")
- CASE_S8x16_OP(Shuffle, "shuffle")
- CASE_S1x2_OP(AnyTrue, "any_true")
- CASE_S1x2_OP(AllTrue, "all_true")
- CASE_S1x4_OP(AnyTrue, "any_true")
- CASE_S1x4_OP(AllTrue, "all_true")
- CASE_S1x8_OP(AnyTrue, "any_true")
- CASE_S1x8_OP(AllTrue, "all_true")
- CASE_S1x16_OP(AnyTrue, "any_true")
- CASE_S1x16_OP(AllTrue, "all_true")
- CASE_SIMDF_OP(Qfma, "qfma")
- CASE_SIMDF_OP(Qfms, "qfms")
-
- CASE_S8x16_OP(LoadSplat, "load_splat")
- CASE_S16x8_OP(LoadSplat, "load_splat")
- CASE_S32x4_OP(LoadSplat, "load_splat")
- CASE_S64x2_OP(LoadSplat, "load_splat")
- CASE_I16x8_OP(Load8x8S, "load8x8_s")
- CASE_I16x8_OP(Load8x8U, "load8x8_u")
- CASE_I32x4_OP(Load16x4S, "load16x4_s")
- CASE_I32x4_OP(Load16x4U, "load16x4_u")
- CASE_I64x2_OP(Load32x2S, "load32x2_s")
- CASE_I64x2_OP(Load32x2U, "load32x2_u")
-
- CASE_I8x16_OP(RoundingAverageU, "avgr_u")
- CASE_I16x8_OP(RoundingAverageU, "avgr_u")
-
- CASE_I8x16_OP(Abs, "abs")
- CASE_I16x8_OP(Abs, "abs")
- CASE_I32x4_OP(Abs, "abs")
-
- CASE_I8x16_OP(BitMask, "bitmask")
- CASE_I16x8_OP(BitMask, "bitmask")
- CASE_I32x4_OP(BitMask, "bitmask")
-
- CASE_F32x4_OP(Pmin, "pmin")
- CASE_F32x4_OP(Pmax, "pmax")
- CASE_F64x2_OP(Pmin, "pmin")
- CASE_F64x2_OP(Pmax, "pmax")
-
- // Atomic operations.
- CASE_OP(AtomicNotify, "atomic.notify")
- CASE_INT_OP(AtomicWait, "atomic.wait")
- CASE_OP(AtomicFence, "atomic.fence")
- CASE_UNSIGNED_ALL_OP(AtomicLoad, "atomic.load")
- CASE_UNSIGNED_ALL_OP(AtomicStore, "atomic.store")
- CASE_UNSIGNED_ALL_OP(AtomicAdd, "atomic.add")
- CASE_UNSIGNED_ALL_OP(AtomicSub, "atomic.sub")
- CASE_UNSIGNED_ALL_OP(AtomicAnd, "atomic.and")
- CASE_UNSIGNED_ALL_OP(AtomicOr, "atomic.or")
- CASE_UNSIGNED_ALL_OP(AtomicXor, "atomic.xor")
- CASE_UNSIGNED_ALL_OP(AtomicExchange, "atomic.xchng")
- CASE_UNSIGNED_ALL_OP(AtomicCompareExchange, "atomic.cmpxchng")
-
- // GC operations.
- CASE_OP(StructNew, "struct.new")
- CASE_OP(StructNewSub, "struct.new_sub")
- CASE_OP(StructNewDefault, "struct.new_default")
- CASE_OP(StructGet, "struct.get")
- CASE_OP(StructGetS, "struct.get_s")
- CASE_OP(StructGetU, "struct.get_u")
- CASE_OP(StructSet, "struct.set")
- CASE_OP(ArrayNew, "array.new")
- CASE_OP(ArrayNewSub, "array.new_sub")
- CASE_OP(ArrayNewDefault, "array.new_default")
- CASE_OP(ArrayGet, "array.get")
- CASE_OP(ArrayGetS, "array.get_s")
- CASE_OP(ArrayGetU, "array.get_u")
- CASE_OP(ArrayLen, "array.len")
- CASE_OP(ArraySet, "array.set")
- CASE_OP(I31New, "i31.new")
- CASE_OP(I31GetS, "i31.get_s")
- CASE_OP(I31GetU, "i31.get_u")
- CASE_OP(RttGet, "rtt.get")
- CASE_OP(RttSub, "rtt.sub")
- CASE_OP(RefTest, "ref.test")
- CASE_OP(RefCast, "ref.cast")
- CASE_OP(BrOnCast, "br_on_cast")
- CASE_OP(RefEq, "ref.eq")
-
-
- case kNumericPrefix:
- case kSimdPrefix:
- case kAtomicPrefix:
- case kGCPrefix:
- return "unknown";
- // clang-format on
- }
- // Even though the switch above handles all well-defined enum values,
- // random modules (e.g. fuzzer generated) can call this function with
- // random (invalid) opcodes. Handle those here:
- return "invalid opcode";
-}
-
-#undef CASE_OP
-#undef CASE_I32_OP
-#undef CASE_I64_OP
-#undef CASE_F32_OP
-#undef CASE_F64_OP
-#undef CASE_REF_OP
-#undef CASE_F64x2_OP
-#undef CASE_F32x4_OP
-#undef CASE_I64x2_OP
-#undef CASE_I32x4_OP
-#undef CASE_I16x8_OP
-#undef CASE_I8x16_OP
-#undef CASE_S128_OP
-#undef CASE_S64x2_OP
-#undef CASE_S32x4_OP
-#undef CASE_S16x8_OP
-#undef CASE_S8x16_OP
-#undef CASE_S1x2_OP
-#undef CASE_S1x4_OP
-#undef CASE_S1x8_OP
-#undef CASE_S1x16_OP
-#undef CASE_INT_OP
-#undef CASE_FLOAT_OP
-#undef CASE_ALL_OP
-#undef CASE_SIMD_OP
-#undef CASE_SIMDI_OP
-#undef CASE_SIGN_OP
-#undef CASE_UNSIGNED_OP
-#undef CASE_UNSIGNED_ALL_OP
-#undef CASE_ALL_SIGN_OP
-#undef CASE_CONVERT_OP
-#undef CASE_CONVERT_SAT_OP
-#undef CASE_L32_OP
-#undef CASE_U32_OP
-
-bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_PREFIX(name, opcode) case k##name##Prefix:
- FOREACH_PREFIX(CHECK_PREFIX)
-#undef CHECK_PREFIX
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
- FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
-#undef CHECK_OPCODE
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
- switch (opcode) {
- case kExprUnreachable:
- case kExprBr:
- case kExprBrTable:
- case kExprReturn:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsBreakable(WasmOpcode opcode) {
- switch (opcode) {
- case kExprBlock:
- case kExprTry:
- case kExprCatch:
- case kExprLoop:
- case kExprElse:
- return false;
- default:
- return true;
- }
-}
-
-bool WasmOpcodes::IsAnyRefOpcode(WasmOpcode opcode) {
- switch (opcode) {
- case kExprRefNull:
- case kExprRefIsNull:
- case kExprRefFunc:
- case kExprRefAsNonNull:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsThrowingOpcode(WasmOpcode opcode) {
- // TODO(8729): Trapping opcodes are not yet considered to be throwing.
- switch (opcode) {
- case kExprThrow:
- case kExprRethrow:
- case kExprCallFunction:
- case kExprCallIndirect:
- return true;
- default:
- return false;
- }
-}
-
-bool WasmOpcodes::IsSimdPostMvpOpcode(WasmOpcode opcode) {
- switch (opcode) {
-#define CHECK_OPCODE(name, opcode, _) case kExpr##name:
- FOREACH_SIMD_POST_MVP_OPCODE(CHECK_OPCODE)
-#undef CHECK_OPCODE
- return true;
- default:
- return false;
- }
-}
-
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
@@ -528,95 +42,6 @@ bool IsJSCompatibleSignature(const FunctionSig* sig,
return true;
}
-namespace {
-
-#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
-enum WasmOpcodeSig : byte {
- kSigEnum_None,
- FOREACH_SIGNATURE(DECLARE_SIG_ENUM)
-};
-#undef DECLARE_SIG_ENUM
-#define DECLARE_SIG(name, ...) \
- constexpr ValueType kTypes_##name[] = {__VA_ARGS__}; \
- constexpr int kReturnsCount_##name = kTypes_##name[0] == kWasmStmt ? 0 : 1; \
- constexpr FunctionSig kSig_##name( \
- kReturnsCount_##name, static_cast<int>(arraysize(kTypes_##name)) - 1, \
- kTypes_##name + (1 - kReturnsCount_##name));
-FOREACH_SIGNATURE(DECLARE_SIG)
-#undef DECLARE_SIG
-
-#define DECLARE_SIG_ENTRY(name, ...) &kSig_##name,
-constexpr const FunctionSig* kCachedSigs[] = {
- nullptr, FOREACH_SIGNATURE(DECLARE_SIG_ENTRY)};
-#undef DECLARE_SIG_ENTRY
-
-constexpr WasmOpcodeSig GetShortOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- return FOREACH_SIMPLE_OPCODE(CASE) FOREACH_SIMPLE_PROTOTYPE_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetAsmJsOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == opc ? kSigEnum_##sig:
- return FOREACH_ASMJS_COMPAT_OPCODE(CASE) kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetSimdOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_SIMD_0_OPERAND_OPCODE(CASE) FOREACH_SIMD_MEM_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetAtomicOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_ATOMIC_OPCODE(CASE) FOREACH_ATOMIC_0_OPERAND_OPCODE(CASE)
- kSigEnum_None;
-#undef CASE
-}
-
-constexpr WasmOpcodeSig GetNumericOpcodeSigIndex(byte opcode) {
-#define CASE(name, opc, sig) opcode == (opc & 0xFF) ? kSigEnum_##sig:
- return FOREACH_NUMERIC_OPCODE(CASE) kSigEnum_None;
-#undef CASE
-}
-
-constexpr std::array<WasmOpcodeSig, 256> kShortSigTable =
- base::make_array<256>(GetShortOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kSimpleAsmjsExprSigTable =
- base::make_array<256>(GetAsmJsOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kSimdExprSigTable =
- base::make_array<256>(GetSimdOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kAtomicExprSigTable =
- base::make_array<256>(GetAtomicOpcodeSigIndex);
-constexpr std::array<WasmOpcodeSig, 256> kNumericExprSigTable =
- base::make_array<256>(GetNumericOpcodeSigIndex);
-
-} // namespace
-
-const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
- switch (opcode >> 8) {
- case 0:
- return kCachedSigs[kShortSigTable[opcode]];
- case kSimdPrefix:
- return kCachedSigs[kSimdExprSigTable[opcode & 0xFF]];
- case kAtomicPrefix:
- return kCachedSigs[kAtomicExprSigTable[opcode & 0xFF]];
- case kNumericPrefix:
- return kCachedSigs[kNumericExprSigTable[opcode & 0xFF]];
- default:
- UNREACHABLE(); // invalid prefix.
- return nullptr;
- }
-}
-
-const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
- DCHECK_GT(kSimpleAsmjsExprSigTable.size(), opcode);
- return kCachedSigs[kSimpleAsmjsExprSigTable[opcode]];
-}
-
// Define constexpr arrays.
constexpr uint8_t LoadType::kLoadSizeLog2[];
constexpr ValueType LoadType::kValueType[];
@@ -625,21 +50,6 @@ constexpr uint8_t StoreType::kStoreSizeLog2[];
constexpr ValueType StoreType::kValueType[];
constexpr MachineRepresentation StoreType::kMemRep[];
-MessageTemplate WasmOpcodes::TrapReasonToMessageId(TrapReason reason) {
- switch (reason) {
-#define TRAPREASON_TO_MESSAGE(name) \
- case k##name: \
- return MessageTemplate::kWasm##name;
- FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
-#undef TRAPREASON_TO_MESSAGE
- default:
- return MessageTemplate::kNone;
- }
-}
-
-const char* WasmOpcodes::TrapReasonMessage(TrapReason reason) {
- return MessageFormatter::TemplateString(TrapReasonToMessageId(reason));
-}
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 8a17b9984e8..4728ee76b0c 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -38,7 +38,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(BrIf, 0x0d, _) \
V(BrTable, 0x0e, _) \
V(Return, 0x0f, _) \
- V(BrOnNull, 0xd4, _) /* gc prototype */
+ V(Let, 0x17, _ /* gc prototype */) \
+ V(BrOnNull, 0xd4, _ /* gc prototype */)
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
@@ -61,6 +62,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(F32Const, 0x43, _) \
V(F64Const, 0x44, _) \
V(RefNull, 0xd0, _) \
+ V(RefIsNull, 0xd1, _) \
V(RefFunc, 0xd2, _) \
V(RefAsNonNull, 0xd3, _)
@@ -229,9 +231,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64SExtendI16, 0xc3, l_l) \
V(I64SExtendI32, 0xc4, l_l)
-#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) \
- V(RefIsNull, 0xd1, i_r) \
- V(RefEq, 0xd5, i_rr) // made-up opcode, guessing future spec (GC)
+#define FOREACH_SIMPLE_PROTOTYPE_OPCODE(V) V(RefEq, 0xd5, i_qq)
// For compatibility with Asm.js.
// These opcodes are not spec'ed (or visible) externally; the idea is
@@ -343,8 +343,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(S128Select, 0xfd52, s_sss) \
V(I8x16Abs, 0xfd60, s_s) \
V(I8x16Neg, 0xfd61, s_s) \
- V(S1x16AnyTrue, 0xfd62, i_s) \
- V(S1x16AllTrue, 0xfd63, i_s) \
+ V(V8x16AnyTrue, 0xfd62, i_s) \
+ V(V8x16AllTrue, 0xfd63, i_s) \
V(I8x16SConvertI16x8, 0xfd65, s_ss) \
V(I8x16UConvertI16x8, 0xfd66, s_ss) \
V(I8x16Shl, 0xfd6b, s_si) \
@@ -363,8 +363,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I8x16RoundingAverageU, 0xfd7b, s_ss) \
V(I16x8Abs, 0xfd80, s_s) \
V(I16x8Neg, 0xfd81, s_s) \
- V(S1x8AnyTrue, 0xfd82, i_s) \
- V(S1x8AllTrue, 0xfd83, i_s) \
+ V(V16x8AnyTrue, 0xfd82, i_s) \
+ V(V16x8AllTrue, 0xfd83, i_s) \
V(I16x8SConvertI32x4, 0xfd85, s_ss) \
V(I16x8UConvertI32x4, 0xfd86, s_ss) \
V(I16x8SConvertI8x16Low, 0xfd87, s_s) \
@@ -388,8 +388,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I16x8RoundingAverageU, 0xfd9b, s_ss) \
V(I32x4Abs, 0xfda0, s_s) \
V(I32x4Neg, 0xfda1, s_s) \
- V(S1x4AnyTrue, 0xfda2, i_s) \
- V(S1x4AllTrue, 0xfda3, i_s) \
+ V(V32x4AnyTrue, 0xfda2, i_s) \
+ V(V32x4AllTrue, 0xfda3, i_s) \
V(I32x4SConvertI16x8Low, 0xfda7, s_s) \
V(I32x4SConvertI16x8High, 0xfda8, s_s) \
V(I32x4UConvertI16x8Low, 0xfda9, s_s) \
@@ -439,8 +439,8 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I8x16BitMask, 0xfd64, i_s) \
V(I16x8BitMask, 0xfd84, i_s) \
V(I32x4BitMask, 0xfda4, i_s) \
- V(S1x2AnyTrue, 0xfdc2, i_s) \
- V(S1x2AllTrue, 0xfdc3, i_s) \
+ V(V64x2AnyTrue, 0xfdc2, i_s) \
+ V(V64x2AllTrue, 0xfdc3, i_s) \
V(I64x2Eq, 0xfdc0, s_ss) \
V(I64x2Ne, 0xfdc4, s_ss) \
V(I64x2LtS, 0xfdc5, s_ss) \
@@ -453,21 +453,30 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I64x2GeU, 0xfdd0, s_ss) \
V(I64x2MinS, 0xfdd6, s_ss) \
V(I64x2MinU, 0xfdd7, s_ss) \
- V(I64x2MaxS, 0xfdd8, s_ss) \
- V(I64x2MaxU, 0xfdd9, s_ss) \
+ V(I64x2MaxS, 0xfde2, s_ss) \
+ V(I64x2MaxU, 0xfdee, s_ss) \
V(F32x4Qfma, 0xfdfc, s_sss) \
V(F32x4Qfms, 0xfdfd, s_sss) \
V(F64x2Qfma, 0xfdfe, s_sss) \
V(F64x2Qfms, 0xfdff, s_sss) \
V(I16x8AddHoriz, 0xfdaf, s_ss) \
V(I32x4AddHoriz, 0xfdb0, s_ss) \
+ V(I32x4DotI16x8S, 0xfdba, s_ss) \
V(F32x4AddHoriz, 0xfdb2, s_ss) \
V(F32x4RecipApprox, 0xfdb3, s_s) \
- V(F32x4RecipSqrtApprox, 0xfdba, s_s) \
- V(F32x4Pmin, 0xfdda, s_ss) \
- V(F32x4Pmax, 0xfddb, s_ss) \
- V(F64x2Pmin, 0xfddc, s_ss) \
- V(F64x2Pmax, 0xfddd, s_ss)
+ V(F32x4RecipSqrtApprox, 0xfdbc, s_s) \
+ V(F32x4Pmin, 0xfdea, s_ss) \
+ V(F32x4Pmax, 0xfdeb, s_ss) \
+ V(F32x4Ceil, 0xfdd8, s_s) \
+ V(F32x4Floor, 0xfdd9, s_s) \
+ V(F32x4Trunc, 0xfdda, s_s) \
+ V(F32x4NearestInt, 0xfddb, s_s) \
+ V(F64x2Pmin, 0xfdf6, s_ss) \
+ V(F64x2Pmax, 0xfdf7, s_ss) \
+ V(F64x2Ceil, 0xfddc, s_s) \
+ V(F64x2Floor, 0xfddd, s_s) \
+ V(F64x2Trunc, 0xfdde, s_s) \
+ V(F64x2NearestInt, 0xfddf, s_s)
#define FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
V(I8x16ExtractLaneS, 0xfd15, _) \
@@ -495,25 +504,28 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
FOREACH_SIMD_1_OPERAND_1_PARAM_OPCODE(V) \
FOREACH_SIMD_1_OPERAND_2_PARAM_OPCODE(V)
-#define FOREACH_NUMERIC_OPCODE(V) \
- V(I32SConvertSatF32, 0xfc00, i_f) \
- V(I32UConvertSatF32, 0xfc01, i_f) \
- V(I32SConvertSatF64, 0xfc02, i_d) \
- V(I32UConvertSatF64, 0xfc03, i_d) \
- V(I64SConvertSatF32, 0xfc04, l_f) \
- V(I64UConvertSatF32, 0xfc05, l_f) \
- V(I64SConvertSatF64, 0xfc06, l_d) \
- V(I64UConvertSatF64, 0xfc07, l_d) \
- V(MemoryInit, 0xfc08, v_iii) \
- V(DataDrop, 0xfc09, v_v) \
- V(MemoryCopy, 0xfc0a, v_iii) \
- V(MemoryFill, 0xfc0b, v_iii) \
- V(TableInit, 0xfc0c, v_iii) \
- V(ElemDrop, 0xfc0d, v_v) \
- V(TableCopy, 0xfc0e, v_iii) \
- V(TableGrow, 0xfc0f, i_ai) \
- V(TableSize, 0xfc10, i_v) \
- /*TableFill is polymorph in the second parameter. It's anyref or funcref.*/ \
+#define FOREACH_NUMERIC_OPCODE(V) \
+ V(I32SConvertSatF32, 0xfc00, i_f) \
+ V(I32UConvertSatF32, 0xfc01, i_f) \
+ V(I32SConvertSatF64, 0xfc02, i_d) \
+ V(I32UConvertSatF64, 0xfc03, i_d) \
+ V(I64SConvertSatF32, 0xfc04, l_f) \
+ V(I64UConvertSatF32, 0xfc05, l_f) \
+ V(I64SConvertSatF64, 0xfc06, l_d) \
+ V(I64UConvertSatF64, 0xfc07, l_d) \
+ V(MemoryInit, 0xfc08, v_iii) \
+ V(DataDrop, 0xfc09, v_v) \
+ V(MemoryCopy, 0xfc0a, v_iii) \
+ V(MemoryFill, 0xfc0b, v_iii) \
+ V(TableInit, 0xfc0c, v_iii) \
+ V(ElemDrop, 0xfc0d, v_v) \
+ V(TableCopy, 0xfc0e, v_iii) \
+ /* TableGrow is polymorphic in the first parameter. */ \
+ /* It's whatever the table type is. */ \
+ V(TableGrow, 0xfc0f, i_ci) \
+ V(TableSize, 0xfc10, i_v) \
+ /* TableFill is polymorphic in the second parameter. */ \
+ /* It's whatever the table type is. */ \
V(TableFill, 0xfc11, v_iii)
#define FOREACH_ATOMIC_OPCODE(V) \
@@ -605,7 +617,7 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(I31New, 0xfb20, _) \
V(I31GetS, 0xfb21, _) \
V(I31GetU, 0xfb22, _) \
- V(RttGet, 0xfb30, _) \
+ V(RttCanon, 0xfb30, _) \
V(RttSub, 0xfb31, _) \
V(RefTest, 0xfb40, _) \
V(RefCast, 0xfb41, _) \
@@ -674,9 +686,9 @@ bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmFeatures&);
V(l_ill, kWasmI64, kWasmI32, kWasmI64, kWasmI64) \
V(i_iil, kWasmI32, kWasmI32, kWasmI32, kWasmI64) \
V(i_ill, kWasmI32, kWasmI32, kWasmI64, kWasmI64) \
- V(i_r, kWasmI32, kWasmAnyRef) \
- V(i_ai, kWasmI32, kWasmFuncRef, kWasmI32) \
- V(i_rr, kWasmI32, kWasmEqRef, kWasmEqRef)
+ V(i_e, kWasmI32, kWasmExternRef) \
+ V(i_ci, kWasmI32, kWasmFuncRef, kWasmI32) \
+ V(i_qq, kWasmI32, kWasmEqRef, kWasmEqRef)
#define FOREACH_SIMD_SIGNATURE(V) \
V(s_s, kWasmS128, kWasmS128) \
@@ -716,21 +728,21 @@ enum TrapReason {
// A collection of opcode-related static methods.
class V8_EXPORT_PRIVATE WasmOpcodes {
public:
- static const char* OpcodeName(WasmOpcode);
- static const FunctionSig* Signature(WasmOpcode);
- static const FunctionSig* AsmjsSignature(WasmOpcode);
- static bool IsPrefixOpcode(WasmOpcode);
- static bool IsControlOpcode(WasmOpcode);
- static bool IsAnyRefOpcode(WasmOpcode);
- static bool IsThrowingOpcode(WasmOpcode);
- static bool IsSimdPostMvpOpcode(WasmOpcode);
+ static constexpr const char* OpcodeName(WasmOpcode);
+ static constexpr const FunctionSig* Signature(WasmOpcode);
+ static constexpr const FunctionSig* AsmjsSignature(WasmOpcode);
+ static constexpr bool IsPrefixOpcode(WasmOpcode);
+ static constexpr bool IsControlOpcode(WasmOpcode);
+ static constexpr bool IsExternRefOpcode(WasmOpcode);
+ static constexpr bool IsThrowingOpcode(WasmOpcode);
+ static constexpr bool IsSimdPostMvpOpcode(WasmOpcode);
// Check whether the given opcode always jumps, i.e. all instructions after
// this one in the current block are dead. Returns false for |end|.
- static bool IsUnconditionalJump(WasmOpcode);
- static bool IsBreakable(WasmOpcode);
+ static constexpr bool IsUnconditionalJump(WasmOpcode);
+ static constexpr bool IsBreakable(WasmOpcode);
- static MessageTemplate TrapReasonToMessageId(TrapReason);
- static const char* TrapReasonMessage(TrapReason);
+ static constexpr MessageTemplate TrapReasonToMessageId(TrapReason);
+ static inline const char* TrapReasonMessage(TrapReason);
};
// Representation of an initializer expression.
@@ -760,13 +772,16 @@ struct WasmInitExpr {
explicit WasmInitExpr(int64_t v) : kind(kI64Const) { val.i64_const = v; }
explicit WasmInitExpr(float v) : kind(kF32Const) { val.f32_const = v; }
explicit WasmInitExpr(double v) : kind(kF64Const) { val.f64_const = v; }
+
+ explicit WasmInitExpr(WasmInitKind kind) : kind(kind) {
+ DCHECK_EQ(kind, kRefNullConst);
+ }
+
WasmInitExpr(WasmInitKind kind, uint32_t index) : kind(kind) {
if (kind == kGlobalIndex) {
val.global_index = index;
} else if (kind == kRefFuncConst) {
val.function_index = index;
- } else if (kind == kRefNullConst) {
- // Nothing to do.
} else {
// For the other types, the other initializers should be used.
UNREACHABLE();
diff --git a/chromium/v8/src/wasm/wasm-subtyping.cc b/chromium/v8/src/wasm/wasm-subtyping.cc
new file mode 100644
index 00000000000..6be554b24c7
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-subtyping.cc
@@ -0,0 +1,167 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-subtyping.h"
+
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace {
+
+bool IsEquivalent(ValueType type1, ValueType type2, const WasmModule* module);
+
+bool IsArrayTypeEquivalent(uint32_t type_index_1, uint32_t type_index_2,
+ const WasmModule* module) {
+ if (module->type_kinds[type_index_1] != kWasmArrayTypeCode ||
+ module->type_kinds[type_index_2] != kWasmArrayTypeCode) {
+ return false;
+ }
+
+ const ArrayType* sub_array = module->types[type_index_1].array_type;
+ const ArrayType* super_array = module->types[type_index_2].array_type;
+ if (sub_array->mutability() != super_array->mutability()) return false;
+
+ // Temporarily cache type equivalence for the recursive call.
+ module->cache_type_equivalence(type_index_1, type_index_2);
+ if (IsEquivalent(sub_array->element_type(), super_array->element_type(),
+ module)) {
+ return true;
+ } else {
+ module->uncache_type_equivalence(type_index_1, type_index_2);
+ // TODO(7748): Consider caching negative results as well.
+ return false;
+ }
+}
+
+bool IsStructTypeEquivalent(uint32_t type_index_1, uint32_t type_index_2,
+ const WasmModule* module) {
+ if (module->type_kinds[type_index_1] != kWasmStructTypeCode ||
+ module->type_kinds[type_index_2] != kWasmStructTypeCode) {
+ return false;
+ }
+ const StructType* sub_struct = module->types[type_index_1].struct_type;
+ const StructType* super_struct = module->types[type_index_2].struct_type;
+
+ if (sub_struct->field_count() != super_struct->field_count()) {
+ return false;
+ }
+
+ // Temporarily cache type equivalence for the recursive call.
+ module->cache_type_equivalence(type_index_1, type_index_2);
+ for (uint32_t i = 0; i < sub_struct->field_count(); i++) {
+ if (sub_struct->mutability(i) != super_struct->mutability(i) ||
+ !IsEquivalent(sub_struct->field(i), super_struct->field(i), module)) {
+ module->uncache_type_equivalence(type_index_1, type_index_2);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsEquivalent(ValueType type1, ValueType type2, const WasmModule* module) {
+ if (type1 == type2) return true;
+ if (type1.kind() != type2.kind()) return false;
+ // At this point, the types can only be both rtts, refs, or optrefs,
+ // but with different indexed types.
+
+ // Rtts need to have the same depth.
+ if (type1.has_depth() && type1.depth() != type2.depth()) return false;
+ // In all three cases, the indexed types have to be equivalent.
+ if (module->is_cached_equivalent_type(type1.ref_index(), type2.ref_index())) {
+ return true;
+ }
+ return IsArrayTypeEquivalent(type1.ref_index(), type2.ref_index(), module) ||
+ IsStructTypeEquivalent(type1.ref_index(), type2.ref_index(), module);
+}
+
+bool IsStructSubtype(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* module) {
+ if (module->type_kinds[subtype_index] != kWasmStructTypeCode ||
+ module->type_kinds[supertype_index] != kWasmStructTypeCode) {
+ return false;
+ }
+ const StructType* sub_struct = module->types[subtype_index].struct_type;
+ const StructType* super_struct = module->types[supertype_index].struct_type;
+
+ if (sub_struct->field_count() < super_struct->field_count()) {
+ return false;
+ }
+
+ module->cache_subtype(subtype_index, supertype_index);
+ for (uint32_t i = 0; i < super_struct->field_count(); i++) {
+ bool sub_mut = sub_struct->mutability(i);
+ bool super_mut = super_struct->mutability(i);
+ if (sub_mut != super_mut ||
+ (sub_mut &&
+ !IsEquivalent(sub_struct->field(i), super_struct->field(i), module)) ||
+ (!sub_mut &&
+ !IsSubtypeOf(sub_struct->field(i), super_struct->field(i), module))) {
+ module->uncache_subtype(subtype_index, supertype_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsArraySubtype(uint32_t subtype_index, uint32_t supertype_index,
+ const WasmModule* module) {
+ if (module->type_kinds[subtype_index] != kWasmArrayTypeCode ||
+ module->type_kinds[supertype_index] != kWasmArrayTypeCode) {
+ return false;
+ }
+ const ArrayType* sub_array = module->types[subtype_index].array_type;
+ const ArrayType* super_array = module->types[supertype_index].array_type;
+ bool sub_mut = sub_array->mutability();
+ bool super_mut = super_array->mutability();
+ module->cache_subtype(subtype_index, supertype_index);
+ if (sub_mut != super_mut ||
+ (sub_mut && !IsEquivalent(sub_array->element_type(),
+ super_array->element_type(), module)) ||
+ (!sub_mut && !IsSubtypeOf(sub_array->element_type(),
+ super_array->element_type(), module))) {
+ module->uncache_subtype(subtype_index, supertype_index);
+ return false;
+ } else {
+ return true;
+ }
+}
+} // namespace
+
+// TODO(7748): Extend this with function and any-heap subtyping.
+V8_EXPORT_PRIVATE bool IsSubtypeOfHeap(HeapType subtype, HeapType supertype,
+ const WasmModule* module) {
+ DCHECK(!module->has_signature(subtype) && !module->has_signature(supertype));
+ if (subtype == supertype) {
+ return true;
+ }
+ // eqref is a supertype of all reference types except funcref.
+ if (supertype == kHeapEq) {
+ return subtype != kHeapFunc;
+ }
+ // At the moment, generic heap types are not subtyping-related otherwise.
+ if (is_generic_heap_type(subtype) || is_generic_heap_type(supertype)) {
+ return false;
+ }
+
+ if (module->is_cached_subtype(subtype, supertype)) {
+ return true;
+ }
+ return IsStructSubtype(subtype, supertype, module) ||
+ IsArraySubtype(subtype, supertype, module);
+}
+
+// TODO(7748): Extend this with function subtyping.
+ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module) {
+ if (a == b) return a;
+ if (IsSubtypeOf(a, b, module)) return a;
+ if (IsSubtypeOf(b, a, module)) return b;
+ return kWasmBottom;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-subtyping.h b/chromium/v8/src/wasm/wasm-subtyping.h
new file mode 100644
index 00000000000..6edf52dd31b
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-subtyping.h
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_SUBTYPING_H_
+#define V8_WASM_WASM_SUBTYPING_H_
+
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct WasmModule;
+V8_EXPORT_PRIVATE bool IsSubtypeOfHeap(HeapType subtype, HeapType supertype,
+ const WasmModule* module);
+
+// The subtyping between value types is described by the following rules:
+// - All types are a supertype of bottom.
+// - All reference types, except funcref, are subtypes of eqref.
+// - optref(ht1) <: optref(ht2) iff ht1 <: ht2.
+// - ref(ht1) <: ref/optref(ht2) iff ht1 <: ht2.
+V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
+ const WasmModule* module) {
+ if (subtype == supertype) return true;
+ bool compatible_references = (subtype.kind() == ValueType::kRef &&
+ supertype.kind() == ValueType::kRef) ||
+ (subtype.kind() == ValueType::kRef &&
+ supertype.kind() == ValueType::kOptRef) ||
+ (subtype.kind() == ValueType::kOptRef &&
+ supertype.kind() == ValueType::kOptRef);
+ if (!compatible_references) return false;
+ return IsSubtypeOfHeap(subtype.heap_type(), supertype.heap_type(), module);
+}
+
+ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_SUBTYPING_H_
diff --git a/chromium/v8/src/wasm/wasm-value.h b/chromium/v8/src/wasm/wasm-value.h
index 9a6f0ca7262..5189eb86768 100644
--- a/chromium/v8/src/wasm/wasm-value.h
+++ b/chromium/v8/src/wasm/wasm-value.h
@@ -63,7 +63,7 @@ class Simd128 {
V(f64, kWasmF64, double) \
V(f64_boxed, kWasmF64, Float64) \
V(s128, kWasmS128, Simd128) \
- V(anyref, kWasmAnyRef, Handle<Object>)
+ V(externref, kWasmExternRef, Handle<Object>)
ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);